[Internal] Parser: Adds Antlr Dependancy (#1691)

* turned back on parser

* adding ANTLR files

* got it to build

* revert csproj

* revert

* revert

* added Component Detection

* adding notice file

* added component name

* updated cs proj

Co-authored-by: Samer Boshra <sboshra@microsoft.com>
This commit is contained in:
Brandon Chong 2020-09-23 09:33:21 -07:00 коммит произвёл GitHub
Родитель eb12cfe391
Коммит 5f7dd88939
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
195 изменённых файлов: 33430 добавлений и 2 удалений

Просмотреть файл

@ -50,6 +50,10 @@
<Compile Remove="RuntimePerfCounters.cs" />
</ItemGroup>
<ItemGroup>
<Content Include="..\..\ThirdPartyNotice.txt" Link="ThirdPartyNotice.txt" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Batch\HybridRowBatchSchemas.json">
<CopyToOutputDirectory>Never</CopyToOutputDirectory>
@ -85,7 +89,6 @@
<ItemGroup>
<PackageReference Include="System.Collections.Immutable" Version="1.7.0" />
<PackageReference Include="System.Numerics.Vectors" Version="4.5.0" />
<PackageReference Include="Antlr4.Runtime.Standard" Version="4.8.0" />
<PackageReference Include="Microsoft.Bcl.AsyncInterfaces" Version="1.0.0" />
<PackageReference Include="Microsoft.VisualStudio.Threading.Analyzers" Version="16.0.102" PrivateAssets="All" />
<PackageReference Include="Newtonsoft.Json" Version="10.0.2" />

Просмотреть файл

@ -0,0 +1,5 @@
$a = Get-Content $args[0]
$b = "// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis."
$c = "// <auto-generated/>"
$d = ""
Set-Content $args[0] -value $b, $c, $d, $a

Просмотреть файл

@ -0,0 +1,97 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#if !PORTABLE
using Antlr4.Runtime.Sharpen;
using Encoding = System.Text.Encoding;
using File = System.IO.File;
namespace Antlr4.Runtime
{
#if COMPACT
using StreamReader = System.IO.StreamReader;
#endif
/// <summary>
/// This is an
/// <see cref="AntlrInputStream"/>
/// that is loaded from a file all at once
/// when you construct the object.
/// </summary>
internal class AntlrFileStream : AntlrInputStream
{
protected internal string fileName;
/// <exception cref="System.IO.IOException"/>
public AntlrFileStream(string fileName)
: this(fileName, null)
{
}
/// <exception cref="System.IO.IOException"/>
public AntlrFileStream(string fileName, Encoding encoding)
{
this.fileName = fileName;
Load(fileName, encoding);
}
/// <exception cref="System.IO.IOException"/>
public virtual void Load(string fileName, Encoding encoding)
{
if (fileName == null)
{
return;
}
string text;
#if !COMPACT
if (encoding != null)
text = File.ReadAllText(fileName, encoding);
else
text = File.ReadAllText(fileName);
#else
if (encoding != null)
text = ReadAllText(fileName, encoding);
else
text = ReadAllText(fileName);
#endif
data = text.ToCharArray();
n = data.Length;
}
public override string SourceName
{
get
{
return fileName;
}
}
#if COMPACT
private static string ReadAllText(string path)
{
using (var reader = new StreamReader(path))
{
return reader.ReadToEnd();
}
}
private static string ReadAllText(string path, Encoding encoding)
{
using (var reader = new StreamReader(path, encoding ?? Encoding.Default))
{
return reader.ReadToEnd();
}
}
#endif
}
}
#endif

Просмотреть файл

@ -0,0 +1,332 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.IO;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
internal abstract class BaseInputCharStream : ICharStream
{
public const int ReadBufferSize = 1024;
public const int InitialBufferSize = 1024;
/// <summary>How many characters are actually in the buffer</summary>
protected internal int n;
/// <summary>0..n-1 index into string of next char</summary>
protected internal int p = 0;
/// <summary>What is name or source of this char stream?</summary>
public string name;
/// <summary>
/// Reset the stream so that it's in the same state it was
/// when the object was created *except* the data array is not
/// touched.
/// </summary>
/// <remarks>
/// Reset the stream so that it's in the same state it was
/// when the object was created *except* the data array is not
/// touched.
/// </remarks>
public virtual void Reset()
{
p = 0;
}
public virtual void Consume()
{
if (p >= n)
{
System.Diagnostics.Debug.Assert(LA(1) == IntStreamConstants.EOF);
throw new InvalidOperationException("cannot consume EOF");
}
else
{
p++;
}
}
//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
public virtual int LA(int i)
{
if (i == 0)
{
return 0;
}
// undefined
if (i < 0)
{
i++;
// e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
if ((p + i - 1) < 0)
{
return IntStreamConstants.EOF;
}
}
// invalid; no char before first char
if ((p + i - 1) >= n)
{
//System.out.println("char LA("+i+")=EOF; p="+p);
return IntStreamConstants.EOF;
}
//System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
return ValueAt(p + i - 1);
}
public virtual int Lt(int i)
{
return LA(i);
}
/// <summary>
/// Return the current input symbol index 0..n where n indicates the
/// last symbol has been read.
/// </summary>
/// <remarks>
/// Return the current input symbol index 0..n where n indicates the
/// last symbol has been read. The index is the index of char to
/// be returned from LA(1).
/// </remarks>
public virtual int Index
{
get
{
return p;
}
}
public virtual int Size
{
get
{
return n;
}
}
/// <summary>mark/release do nothing; we have entire buffer</summary>
public virtual int Mark()
{
return -1;
}
public virtual void Release(int marker)
{
}
/// <summary>
/// consume() ahead until p==index; can't just set p=index as we must
/// update line and charPositionInLine.
/// </summary>
/// <remarks>
/// consume() ahead until p==index; can't just set p=index as we must
/// update line and charPositionInLine. If we seek backwards, just set p
/// </remarks>
public virtual void Seek(int index)
{
if (index <= p)
{
p = index;
// just jump; don't update stream state (line, ...)
return;
}
// seek forward, consume until p hits index or n (whichever comes first)
index = Math.Min(index, n);
while (p < index)
{
Consume();
}
}
public virtual string GetText(Interval interval)
{
int start = interval.a;
int stop = interval.b;
if (stop >= n)
{
stop = n - 1;
}
int count = stop - start + 1;
if (start >= n)
{
return string.Empty;
}
return ConvertDataToString(start, count);
}
protected abstract int ValueAt(int i);
protected abstract string ConvertDataToString(int start, int count);
public override sealed string ToString()
{
return ConvertDataToString(0, n);
}
public virtual string SourceName
{
get
{
if (string.IsNullOrEmpty(name))
{
return IntStreamConstants.UnknownSourceName;
}
return name;
}
}
}
/// <summary>
/// Vacuum all input from a
/// <see cref="System.IO.TextReader"/>
/// /
/// <see cref="System.IO.Stream"/>
/// and then treat it
/// like a
/// <c>char[]</c>
/// buffer. Can also pass in a
/// <see cref="string"/>
/// or
/// <c>char[]</c>
/// to use.
/// <p>If you need encoding, pass in stream/reader with correct encoding.</p>
/// </summary>
internal class AntlrInputStream : BaseInputCharStream
{
/// <summary>The data being scanned</summary>
protected internal char[] data;
public AntlrInputStream()
{
}
/// <summary>Copy data in string to a local char array</summary>
public AntlrInputStream(string input)
{
this.data = input.ToCharArray();
this.n = input.Length;
}
/// <summary>This is the preferred constructor for strings as no data is copied</summary>
public AntlrInputStream(char[] data, int numberOfActualCharsInArray)
{
this.data = data;
this.n = numberOfActualCharsInArray;
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(TextReader r)
: this(r, InitialBufferSize, ReadBufferSize)
{
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(TextReader r, int initialSize)
: this(r, initialSize, ReadBufferSize)
{
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(TextReader r, int initialSize, int readChunkSize)
{
Load(r, initialSize, readChunkSize);
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(Stream input)
: this(new StreamReader(input), InitialBufferSize)
{
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(Stream input, int initialSize)
: this(new StreamReader(input), initialSize)
{
}
/// <exception cref="System.IO.IOException"/>
public AntlrInputStream(Stream input, int initialSize, int readChunkSize)
: this(new StreamReader(input), initialSize, readChunkSize)
{
}
/// <exception cref="System.IO.IOException"/>
public virtual void Load(TextReader r, int size, int readChunkSize)
{
if (r == null)
{
return;
}
data = r.ReadToEnd().ToCharArray();
n = data.Length;
}
protected override int ValueAt(int i)
{
return data[i];
}
protected override string ConvertDataToString(int start, int count)
{
// System.err.println("data: "+Arrays.toString(data)+", n="+n+
// ", start="+start+
// ", stop="+stop);
return new string(data, start, count);
}
}
/// <summary>
/// Alternative to
/// <see cref="AntlrInputStream"/>
/// which treats the input as a series of Unicode code points,
/// instead of a series of UTF-16 code units.
///
/// Use this if you need to parse input which potentially contains
/// Unicode values > U+FFFF.
/// </summary>
internal class CodePointCharStream : BaseInputCharStream
{
private int[] data;
public CodePointCharStream(string input)
{
this.data = new int[input.Length];
int dataIdx = 0;
for (int i = 0; i < input.Length; ) {
var codePoint = Char.ConvertToUtf32(input, i);
data[dataIdx++] = codePoint;
if (dataIdx > data.Length) {
Array.Resize(ref data, data.Length * 2);
}
i += codePoint <= 0xFFFF ? 1 : 2;
}
this.n = dataIdx;
}
protected override int ValueAt(int i)
{
return data[i];
}
protected override string ConvertDataToString(int start, int count)
{
var sb = new StringBuilder(count);
for (int i = start; i < start + count; i++) {
sb.Append(Char.ConvertFromUtf32(data[i]));
}
return sb.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,265 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal class ATN
{
public const int INVALID_ALT_NUMBER = 0;
[NotNull]
public readonly IList<ATNState> states = new List<ATNState>();
/// <summary>
/// Each subrule/rule is a decision point and we must track them so we
/// can go back later and build DFA predictors for them.
/// </summary>
/// <remarks>
/// Each subrule/rule is a decision point and we must track them so we
/// can go back later and build DFA predictors for them. This includes
/// all the rules, subrules, optional blocks, ()+, ()* etc...
/// </remarks>
[NotNull]
public readonly IList<DecisionState> decisionToState = new List<DecisionState>();
/// <summary>Maps from rule index to starting state number.</summary>
/// <remarks>Maps from rule index to starting state number.</remarks>
public RuleStartState[] ruleToStartState;
/// <summary>Maps from rule index to stop state number.</summary>
/// <remarks>Maps from rule index to stop state number.</remarks>
public RuleStopState[] ruleToStopState;
[NotNull]
public readonly IDictionary<string, TokensStartState> modeNameToStartState = new Dictionary<string, TokensStartState>();
/// <summary>The type of the ATN.</summary>
/// <remarks>The type of the ATN.</remarks>
public readonly ATNType grammarType;
/// <summary>The maximum value for any symbol recognized by a transition in the ATN.</summary>
/// <remarks>The maximum value for any symbol recognized by a transition in the ATN.</remarks>
public readonly int maxTokenType;
/// <summary>For lexer ATNs, this maps the rule index to the resulting token type.</summary>
/// <remarks>
/// For lexer ATNs, this maps the rule index to the resulting token type.
/// For parser ATNs, this maps the rule index to the generated bypass token
/// type if the
/// <see cref="ATNDeserializationOptions.GenerateRuleBypassTransitions()"/>
/// deserialization option was specified; otherwise, this is
/// <see langword="null"/>
/// .
/// </remarks>
public int[] ruleToTokenType;
/// <summary>
/// For lexer ATNs, this is an array of
/// <see cref="ILexerAction"/>
/// objects which may
/// be referenced by action transitions in the ATN.
/// </summary>
public ILexerAction[] lexerActions;
[NotNull]
public readonly IList<TokensStartState> modeToStartState = new List<TokensStartState>();
private readonly PredictionContextCache contextCache = new PredictionContextCache();
[NotNull]
public DFA[] decisionToDFA = new DFA[0];
[NotNull]
public DFA[] modeToDFA = new DFA[0];
protected internal readonly ConcurrentDictionary<int, int> LL1Table = new ConcurrentDictionary<int, int>();
/// <summary>Used for runtime deserialization of ATNs from strings</summary>
public ATN(ATNType grammarType, int maxTokenType)
{
this.grammarType = grammarType;
this.maxTokenType = maxTokenType;
}
public virtual PredictionContext GetCachedContext(PredictionContext context)
{
return PredictionContext.GetCachedContext(context, contextCache, new PredictionContext.IdentityHashMap());
}
/// <summary>
/// Compute the set of valid tokens that can occur starting in state
/// <paramref name="s"/>
/// .
/// If
/// <paramref name="ctx"/>
/// is
/// <see cref="PredictionContext.EMPTY"/>
/// , the set of tokens will not include what can follow
/// the rule surrounding
/// <paramref name="s"/>
/// . In other words, the set will be
/// restricted to tokens reachable staying within
/// <paramref name="s"/>
/// 's rule.
/// </summary>
[return: NotNull]
public virtual IntervalSet NextTokens(ATNState s, RuleContext ctx)
{
LL1Analyzer anal = new LL1Analyzer(this);
IntervalSet next = anal.Look(s, ctx);
return next;
}
/// <summary>
/// Compute the set of valid tokens that can occur starting in
/// <paramref name="s"/>
/// and
/// staying in same rule.
/// <see cref="TokenConstants.EPSILON"/>
/// is in set if we reach end of
/// rule.
/// </summary>
[return: NotNull]
public virtual IntervalSet NextTokens(ATNState s)
{
if (s.nextTokenWithinRule != null)
{
return s.nextTokenWithinRule;
}
s.nextTokenWithinRule = NextTokens(s, null);
s.nextTokenWithinRule.SetReadonly(true);
return s.nextTokenWithinRule;
}
public virtual void AddState(ATNState state)
{
if (state != null)
{
state.atn = this;
state.stateNumber = states.Count;
}
states.Add(state);
}
public virtual void RemoveState(ATNState state)
{
states[state.stateNumber] = null;
}
// just free mem, don't shift states in list
public virtual void DefineMode(string name, TokensStartState s)
{
modeNameToStartState[name] = s;
modeToStartState.Add(s);
modeToDFA = Arrays.CopyOf(modeToDFA, modeToStartState.Count);
modeToDFA[modeToDFA.Length - 1] = new DFA(s);
DefineDecisionState(s);
}
public virtual int DefineDecisionState(DecisionState s)
{
decisionToState.Add(s);
s.decision = decisionToState.Count - 1;
decisionToDFA = Arrays.CopyOf(decisionToDFA, decisionToState.Count);
decisionToDFA[decisionToDFA.Length - 1] = new DFA(s, s.decision);
return s.decision;
}
public virtual DecisionState GetDecisionState(int decision)
{
if (decisionToState.Count != 0)
{
return decisionToState[decision];
}
return null;
}
public virtual int NumberOfDecisions
{
get
{
return decisionToState.Count;
}
}
/// <summary>
/// Computes the set of input symbols which could follow ATN state number
/// <paramref name="stateNumber"/>
/// in the specified full
/// <paramref name="context"/>
/// . This method
/// considers the complete parser context, but does not evaluate semantic
/// predicates (i.e. all predicates encountered during the calculation are
/// assumed true). If a path in the ATN exists from the starting state to the
/// <see cref="RuleStopState"/>
/// of the outermost context without matching any
/// symbols,
/// <see cref="TokenConstants.EOF"/>
/// is added to the returned set.
/// <p>If
/// <paramref name="context"/>
/// is
/// <see langword="null"/>
/// , it is treated as
/// <see cref="ParserRuleContext.EmptyContext"/>
/// .</p>
/// </summary>
/// <param name="stateNumber">the ATN state number</param>
/// <param name="context">the full parse context</param>
/// <returns>
/// The set of potentially valid input symbols which could follow the
/// specified state in the specified context.
/// </returns>
/// <exception cref="System.ArgumentException">
/// if the ATN does not contain a state with
/// number
/// <paramref name="stateNumber"/>
/// </exception>
[return: NotNull]
public virtual IntervalSet GetExpectedTokens(int stateNumber, RuleContext context)
{
if (stateNumber < 0 || stateNumber >= states.Count)
{
throw new ArgumentException("Invalid state number.");
}
RuleContext ctx = context;
ATNState s = states[stateNumber];
IntervalSet following = NextTokens(s);
if (!following.Contains(TokenConstants.EPSILON))
{
return following;
}
IntervalSet expected = new IntervalSet();
expected.AddAll(following);
expected.Remove(TokenConstants.EPSILON);
while (ctx != null && ctx.invokingState >= 0 && following.Contains(TokenConstants.EPSILON))
{
ATNState invokingState = states[ctx.invokingState];
RuleTransition rt = (RuleTransition)invokingState.Transition(0);
following = NextTokens(rt.followState);
expected.AddAll(following);
expected.Remove(TokenConstants.EPSILON);
ctx = ctx.Parent;
}
if (following.Contains(TokenConstants.EPSILON))
{
expected.Add(TokenConstants.EOF);
}
return expected;
}
}
}

Просмотреть файл

@ -0,0 +1,249 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/** A tuple: (ATN state, predicted alt, syntactic, semantic context).
* The syntactic context is a graph-structured stack node whose
* path(s) to the root is the rule invocation(s)
* chain used to arrive at the state. The semantic context is
* the tree of semantic predicates encountered before reaching
* an ATN state.
*/
internal class ATNConfig
{
/**
* This field stores the bit mask for implementing the
* {@link #isPrecedenceFilterSuppressed} property as a bit within the
* existing {@link #reachesIntoOuterContext} field.
*/
private static readonly int SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
/** The ATN state associated with this configuration */
public readonly ATNState state;
/** What alt (or lexer rule) is predicted by this configuration */
public readonly int alt;
/** The stack of invoking states leading to the rule/states associated
* with this config. We track only those contexts pushed during
* execution of the ATN simulator.
*/
public PredictionContext context;
/**
* We cannot execute predicates dependent upon local context unless
* we know for sure we are in the correct context. Because there is
* no way to do this efficiently, we simply cannot evaluate
* dependent predicates unless we are in the rule that initially
* invokes the ATN simulator.
*
* <p>
* closure() tracks the depth of how far we dip into the outer context:
* depth &gt; 0. Note that it may not be totally accurate depth since I
* don't ever decrement. TODO: make it a boolean then</p>
*
* <p>
* For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
* is also backed by this field. Since the field is publicly accessible, the
* highest bit which would not cause the value to become negative is used to
* store this field. This choice minimizes the risk that code which only
* compares this value to 0 would be affected by the new purpose of the
* flag. It also ensures the performance of the existing {@link ATNConfig}
* constructors as well as certain operations like
* {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are
* <em>completely</em> unaffected by the change.</p>
*/
public int reachesIntoOuterContext;
public readonly SemanticContext semanticContext;
public ATNConfig(ATNConfig old)
{ // dup
this.state = old.state;
this.alt = old.alt;
this.context = old.context;
this.semanticContext = old.semanticContext;
this.reachesIntoOuterContext = old.reachesIntoOuterContext;
}
public ATNConfig(ATNState state,
int alt,
PredictionContext context)
: this(state, alt, context, SemanticContext.NONE)
{
}
public ATNConfig(ATNState state,
int alt,
PredictionContext context,
SemanticContext semanticContext)
{
this.state = state;
this.alt = alt;
this.context = context;
this.semanticContext = semanticContext;
}
public ATNConfig(ATNConfig c, ATNState state)
: this(c, state, c.context, c.semanticContext)
{
}
public ATNConfig(ATNConfig c, ATNState state,
SemanticContext semanticContext)
: this(c, state, c.context, semanticContext)
{
}
public ATNConfig(ATNConfig c,
SemanticContext semanticContext)
: this(c, c.state, c.context, semanticContext)
{
}
public ATNConfig(ATNConfig c, ATNState state,
PredictionContext context)
: this(c, state, context, c.semanticContext)
{
}
public ATNConfig(ATNConfig c, ATNState state,
PredictionContext context,
SemanticContext semanticContext)
{
this.state = state;
this.alt = c.alt;
this.context = context;
this.semanticContext = semanticContext;
this.reachesIntoOuterContext = c.reachesIntoOuterContext;
}
/**
* This method gets the value of the {@link #reachesIntoOuterContext} field
* as it existed prior to the introduction of the
* {@link #isPrecedenceFilterSuppressed} method.
*/
public int OuterContextDepth
{
get
{
return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER;
}
}
public bool IsPrecedenceFilterSuppressed
{
get
{
return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0;
}
}
public void SetPrecedenceFilterSuppressed(bool value)
{
if (value)
{
this.reachesIntoOuterContext |= SUPPRESS_PRECEDENCE_FILTER;
}
else {
this.reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER;
}
}
/** An ATN configuration is equal to another if both have
* the same state, they predict the same alternative, and
* syntactic/semantic contexts are the same.
*/
public override bool Equals(Object o)
{
if (!(o is ATNConfig)) {
return false;
}
return this.Equals((ATNConfig)o);
}
public virtual bool Equals(ATNConfig other)
{
if (this == other)
{
return true;
}
else if (other == null)
{
return false;
}
return this.state.stateNumber == other.state.stateNumber
&& this.alt == other.alt
&& (this.context == other.context || (this.context != null && this.context.Equals(other.context)))
&& this.semanticContext.Equals(other.semanticContext)
&& this.IsPrecedenceFilterSuppressed == other.IsPrecedenceFilterSuppressed;
}
public override int GetHashCode()
{
int hashCode = MurmurHash.Initialize(7);
hashCode = MurmurHash.Update(hashCode, state.stateNumber);
hashCode = MurmurHash.Update(hashCode, alt);
hashCode = MurmurHash.Update(hashCode, context);
hashCode = MurmurHash.Update(hashCode, semanticContext);
hashCode = MurmurHash.Finish(hashCode, 4);
return hashCode;
}
public override String ToString()
{
return ToString(null, true);
}
public String ToString(IRecognizer recog, bool showAlt)
{
StringBuilder buf = new StringBuilder();
// if ( state.ruleIndex>=0 ) {
// if ( recog!=null ) buf.append(recog.getRuleNames()[state.ruleIndex]+":");
// else buf.append(state.ruleIndex+":");
// }
buf.Append('(');
buf.Append(state);
if (showAlt)
{
buf.Append(",");
buf.Append(alt);
}
if (context != null)
{
buf.Append(",[");
buf.Append(context.ToString());
buf.Append("]");
}
if (semanticContext != null && semanticContext != SemanticContext.NONE)
{
buf.Append(",");
buf.Append(semanticContext);
}
if (OuterContextDepth > 0)
{
buf.Append(",up=").Append(OuterContextDepth);
}
buf.Append(')');
return buf.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,426 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal class ATNConfigSet
{
/** Indicates that the set of configurations is read-only. Do not
* allow any code to manipulate the set; DFA states will point at
* the sets and they must not change. This does not protect the other
* fields; in particular, conflictingAlts is set after
* we've made this readonly.
*/
protected bool readOnly = false;
/**
* All configs but hashed by (s, i, _, pi) not including context. Wiped out
* when we go readonly as this set becomes a DFA state.
*/
public ConfigHashSet configLookup;
/** Track the elements as they are added to the set; supports get(i) */
public ArrayList<ATNConfig> configs = new ArrayList<ATNConfig>(7);
// TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
// TODO: can we track conflicts as they are added to save scanning configs later?
public int uniqueAlt;
/** Currently this is only used when we detect SLL conflict; this does
* not necessarily represent the ambiguous alternatives. In fact,
* I should also point out that this seems to include predicated alternatives
* that have predicates that evaluate to false. Computed in computeTargetState().
*/
public BitSet conflictingAlts;
// Used in parser and lexer. In lexer, it indicates we hit a pred
// while computing a closure operation. Don't make a DFA state from this.
public bool hasSemanticContext;
public bool dipsIntoOuterContext;
/** Indicates that this configuration set is part of a full context
* LL prediction. It will be used to determine how to merge $. With SLL
* it's a wildcard whereas it is not for LL context merge.
*/
public readonly bool fullCtx;
private int cachedHashCode = -1;
public ATNConfigSet(bool fullCtx)
{
configLookup = new ConfigHashSet();
this.fullCtx = fullCtx;
}
public ATNConfigSet()
: this(true)
{
}
public ATNConfigSet(ATNConfigSet old)
: this(old.fullCtx)
{
AddAll(old.configs);
this.uniqueAlt = old.uniqueAlt;
this.conflictingAlts = old.conflictingAlts;
this.hasSemanticContext = old.hasSemanticContext;
this.dipsIntoOuterContext = old.dipsIntoOuterContext;
}
public bool Add(ATNConfig config)
{
return Add(config, null);
}
/**
* Adding a new config means merging contexts with existing configs for
* {@code (s, i, pi, _)}, where {@code s} is the
* {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and
* {@code pi} is the {@link ATNConfig#semanticContext}. We use
* {@code (s,i,pi)} as key.
*
* <p>This method updates {@link #dipsIntoOuterContext} and
* {@link #hasSemanticContext} when necessary.</p>
*/
public bool Add(ATNConfig config, MergeCache mergeCache)
{
if (readOnly)
throw new Exception("This set is readonly");
if (config.semanticContext != SemanticContext.NONE)
{
hasSemanticContext = true;
}
if (config.OuterContextDepth > 0)
{
dipsIntoOuterContext = true;
}
ATNConfig existing = configLookup.GetOrAdd(config);
if (existing == config)
{ // we added this new one
cachedHashCode = -1;
configs.Add(config); // track order here
return true;
}
// a previous (s,i,pi,_), merge with it and save result
bool rootIsWildcard = !fullCtx;
PredictionContext merged = PredictionContext.Merge(existing.context, config.context, rootIsWildcard, mergeCache);
// no need to check for existing.context, config.context in cache
// since only way to create new graphs is "call rule" and here. We
// cache at both places.
existing.reachesIntoOuterContext = Math.Max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext);
// make sure to preserve the precedence filter suppression during the merge
if (config.IsPrecedenceFilterSuppressed)
{
existing.SetPrecedenceFilterSuppressed(true);
}
existing.context = merged; // replace context; no need to alt mapping
return true;
}
/** Return a List holding list of configs */
public List<ATNConfig> Elements
{
get
{
return configs;
}
}
public HashSet<ATNState> GetStates()
{
HashSet<ATNState> states = new HashSet<ATNState>();
foreach (ATNConfig c in configs)
{
states.Add(c.state);
}
return states;
}
/**
* Gets the complete set of represented alternatives for the configuration
* set.
*
* @return the set of represented alternatives in this configuration set
*
* @since 4.3
*/
public BitSet GetAlts()
{
BitSet alts = new BitSet();
foreach (ATNConfig config in configs)
{
alts.Set(config.alt);
}
return alts;
}
public List<SemanticContext> GetPredicates()
{
List<SemanticContext> preds = new List<SemanticContext>();
foreach (ATNConfig c in configs)
{
if (c.semanticContext != SemanticContext.NONE)
{
preds.Add(c.semanticContext);
}
}
return preds;
}
public ATNConfig Get(int i) { return configs[i]; }
public void OptimizeConfigs(ATNSimulator interpreter)
{
if (readOnly)
throw new Exception("This set is readonly");
if (configLookup.Count == 0)
return;
foreach (ATNConfig config in configs)
{
// int before = PredictionContext.getAllContextNodes(config.context).size();
config.context = interpreter.getCachedContext(config.context);
// int after = PredictionContext.getAllContextNodes(config.context).size();
// System.out.println("configs "+before+"->"+after);
}
}
public bool AddAll(ICollection<ATNConfig> coll)
{
foreach (ATNConfig c in coll) Add(c);
return false;
}
public override bool Equals(Object o)
{
if (o == this)
{
return true;
}
else if (!(o is ATNConfigSet))
{
return false;
}
// System.out.print("equals " + this + ", " + o+" = ");
ATNConfigSet other = (ATNConfigSet)o;
bool same = configs != null &&
configs.Equals(other.configs) && // includes stack context
this.fullCtx == other.fullCtx &&
this.uniqueAlt == other.uniqueAlt &&
this.conflictingAlts == other.conflictingAlts &&
this.hasSemanticContext == other.hasSemanticContext &&
this.dipsIntoOuterContext == other.dipsIntoOuterContext;
// System.out.println(same);
return same;
}
public override int GetHashCode()
{
if (IsReadOnly)
{
if (cachedHashCode == -1)
{
cachedHashCode = configs.GetHashCode();
}
return cachedHashCode;
}
return configs.GetHashCode();
}
public int Count
{
get
{
return configs.Count;
}
}
public bool Empty
{
get
{
return configs.Count == 0;
}
}
public bool Contains(Object o)
{
if (configLookup == null)
{
throw new Exception("This method is not implemented for readonly sets.");
}
return configLookup.ContainsKey((ATNConfig)o);
}
public void Clear()
{
if (readOnly)
throw new Exception("This set is readonly");
configs.Clear();
cachedHashCode = -1;
configLookup.Clear();
}
public bool IsReadOnly
{
get
{
return readOnly;
}
set
{
this.readOnly = value;
configLookup = null; // can't mod, no need for lookup cache
}
}
public override String ToString()
{
StringBuilder buf = new StringBuilder();
buf.Append('[');
List<ATNConfig> cfgs = Elements;
if (cfgs.Count > 0)
{
foreach (ATNConfig c in cfgs)
{
buf.Append(c.ToString());
buf.Append(", ");
}
buf.Length = buf.Length - 2;
}
buf.Append(']');
if (hasSemanticContext)
buf.Append(",hasSemanticContext=")
.Append(hasSemanticContext);
if (uniqueAlt != ATN.INVALID_ALT_NUMBER)
buf.Append(",uniqueAlt=")
.Append(uniqueAlt);
if (conflictingAlts != null)
buf.Append(",conflictingAlts=")
.Append(conflictingAlts);
if (dipsIntoOuterContext)
buf.Append(",dipsIntoOuterContext");
return buf.ToString();
}
}
internal class OrderedATNConfigSet : ATNConfigSet
{
public OrderedATNConfigSet()
{
this.configLookup = new LexerConfigHashSet();
}
internal class LexerConfigHashSet : ConfigHashSet
{
public LexerConfigHashSet()
: base(new ObjectEqualityComparator())
{
}
}
}
internal class ObjectEqualityComparator : IEqualityComparer<ATNConfig>
{
public int GetHashCode(ATNConfig o)
{
if (o == null)
return 0;
else
return o.GetHashCode();
}
public bool Equals(ATNConfig a, ATNConfig b)
{
if (a == b) return true;
if (a == null || b == null) return false;
return a.Equals(b);
}
}
/**
* The reason that we need this is because we don't want the hash map to use
* the standard hash code and equals. We need all configurations with the same
* {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
* the number of objects associated with ATNConfigs. The other solution is to
* use a hash table that lets us specify the equals/hashcode operation.
*/
internal class ConfigHashSet : Dictionary<ATNConfig, ATNConfig>
{
public ConfigHashSet(IEqualityComparer<ATNConfig> comparer)
: base(comparer)
{
}
public ConfigHashSet()
: base(new ConfigEqualityComparator())
{
}
public ATNConfig GetOrAdd(ATNConfig config)
{
ATNConfig existing;
if (this.TryGetValue(config, out existing))
return existing;
else
{
this.Put(config, config);
return config;
}
}
}
internal class ConfigEqualityComparator : IEqualityComparer<ATNConfig>
{
public int GetHashCode(ATNConfig o)
{
int hashCode = 7;
hashCode = 31 * hashCode + o.state.stateNumber;
hashCode = 31 * hashCode + o.alt;
hashCode = 31 * hashCode + o.semanticContext.GetHashCode();
return hashCode;
}
public bool Equals(ATNConfig a, ATNConfig b)
{
if (a == b) return true;
if (a == null || b == null) return false;
return a.state.stateNumber == b.state.stateNumber
&& a.alt == b.alt
&& a.semanticContext.Equals(b.semanticContext);
}
}
}

Просмотреть файл

@ -0,0 +1,119 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal class ATNDeserializationOptions
{
private static readonly Antlr4.Runtime.Atn.ATNDeserializationOptions defaultOptions;
static ATNDeserializationOptions()
{
defaultOptions = new Antlr4.Runtime.Atn.ATNDeserializationOptions();
defaultOptions.MakeReadOnly();
}
private bool readOnly;
private bool verifyATN;
private bool generateRuleBypassTransitions;
private bool optimize;
public ATNDeserializationOptions()
{
this.verifyATN = true;
this.generateRuleBypassTransitions = false;
this.optimize = true;
}
public ATNDeserializationOptions(Antlr4.Runtime.Atn.ATNDeserializationOptions options)
{
this.verifyATN = options.verifyATN;
this.generateRuleBypassTransitions = options.generateRuleBypassTransitions;
this.optimize = options.optimize;
}
[NotNull]
public static Antlr4.Runtime.Atn.ATNDeserializationOptions Default
{
get
{
return defaultOptions;
}
}
public bool IsReadOnly
{
get
{
return readOnly;
}
}
public void MakeReadOnly()
{
readOnly = true;
}
public bool VerifyAtn
{
get
{
return verifyATN;
}
set
{
bool verifyATN = value;
ThrowIfReadOnly();
this.verifyATN = verifyATN;
}
}
public bool GenerateRuleBypassTransitions
{
get
{
return generateRuleBypassTransitions;
}
set
{
bool generateRuleBypassTransitions = value;
ThrowIfReadOnly();
this.generateRuleBypassTransitions = generateRuleBypassTransitions;
}
}
public bool Optimize
{
get
{
return optimize;
}
set
{
bool optimize = value;
ThrowIfReadOnly();
this.optimize = optimize;
}
}
protected internal virtual void ThrowIfReadOnly()
{
if (IsReadOnly)
{
throw new InvalidOperationException("The object is read only.");
}
}
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,108 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal abstract class ATNSimulator
{
/** Must distinguish between missing edge and edge we know leads nowhere */
public static readonly DFAState ERROR = InitERROR();
static DFAState InitERROR()
{
DFAState state = new DFAState(new ATNConfigSet());
state.stateNumber = Int32.MaxValue;
return state;
}
public readonly ATN atn;
/** The context cache maps all PredictionContext objects that are equals()
* to a single cached copy. This cache is shared across all contexts
* in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
* to use only cached nodes/graphs in addDFAState(). We don't want to
* fill this during closure() since there are lots of contexts that
* pop up but are not used ever again. It also greatly slows down closure().
*
* <p>This cache makes a huge difference in memory and a little bit in speed.
* For the Java grammar on java.*, it dropped the memory requirements
* at the end from 25M to 16M. We don't store any of the full context
* graphs in the DFA because they are limited to local context only,
* but apparently there's a lot of repetition there as well. We optimize
* the config contexts before storing the config set in the DFA states
* by literally rebuilding them with cached subgraphs only.</p>
*
* <p>I tried a cache for use during closure operations, that was
* whacked after each adaptivePredict(). It cost a little bit
* more time I think and doesn't save on the overall footprint
* so it's not worth the complexity.</p>
*/
protected readonly PredictionContextCache sharedContextCache;
public ATNSimulator(ATN atn, PredictionContextCache sharedContextCache)
{
this.atn = atn;
this.sharedContextCache = sharedContextCache;
}
public abstract void Reset();
/**
* Clear the DFA cache used by the current instance. Since the DFA cache may
* be shared by multiple ATN simulators, this method may affect the
* performance (but not accuracy) of other parsers which are being used
* concurrently.
*
* @throws UnsupportedOperationException if the current instance does not
* support clearing the DFA.
*
* @since 4.3
*/
public virtual void ClearDFA()
{
throw new Exception("This ATN simulator does not support clearing the DFA.");
}
protected void ConsoleWriteLine(string format, params object[] arg)
{
#if !PORTABLE
System.Console.WriteLine(format, arg);
#endif
}
public PredictionContextCache getSharedContextCache()
{
return sharedContextCache;
}
public PredictionContext getCachedContext(PredictionContext context)
{
if (sharedContextCache == null) return context;
lock (sharedContextCache)
{
PredictionContext.IdentityHashMap visited =
new PredictionContext.IdentityHashMap();
return PredictionContext.GetCachedContext(context,
sharedContextCache,
visited);
}
}
}
}

Просмотреть файл

@ -0,0 +1,196 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal abstract class ATNState
{
public const int InitialNumTransitions = 4;
public static readonly ReadOnlyCollection<string> serializationNames = new ReadOnlyCollection<string>(Arrays.AsList("INVALID", "BASIC", "RULE_START", "BLOCK_START", "PLUS_BLOCK_START", "STAR_BLOCK_START", "TOKEN_START", "RULE_STOP", "BLOCK_END", "STAR_LOOP_BACK", "STAR_LOOP_ENTRY", "PLUS_LOOP_BACK", "LOOP_END"));
public const int InvalidStateNumber = -1;
public ATN atn = null;
public int stateNumber = InvalidStateNumber;
public int ruleIndex;
public bool epsilonOnlyTransitions = false;
protected internal readonly List<Transition> transitions = new List<Transition>(InitialNumTransitions);
protected internal List<Transition> optimizedTransitions;
public IntervalSet nextTokenWithinRule;
public virtual int NonStopStateNumber
{
get
{
return stateNumber;
}
}
public override int GetHashCode()
{
return stateNumber;
}
public override bool Equals(object o)
{
return o==this ||
(o is ATNState && stateNumber == ((ATNState)o).stateNumber);
}
public virtual bool IsNonGreedyExitState
{
get
{
return false;
}
}
public override string ToString()
{
return stateNumber.ToString();
}
public virtual Transition[] TransitionsArray
{
get
{
return transitions.ToArray();
}
}
public virtual int NumberOfTransitions
{
get
{
return transitions.Count;
}
}
public virtual void AddTransition(Antlr4.Runtime.Atn.Transition e)
{
AddTransition(transitions.Count, e);
}
public virtual void AddTransition(int index, Antlr4.Runtime.Atn.Transition e)
{
if (transitions.Count == 0)
{
epsilonOnlyTransitions = e.IsEpsilon;
}
else
{
if (epsilonOnlyTransitions != e.IsEpsilon)
{
#if !PORTABLE
System.Console.Error.WriteLine("ATN state {0} has both epsilon and non-epsilon transitions.", stateNumber);
#endif
epsilonOnlyTransitions = false;
}
}
transitions.Insert(index, e);
}
public virtual Antlr4.Runtime.Atn.Transition Transition(int i)
{
return transitions[i];
}
public virtual void SetTransition(int i, Antlr4.Runtime.Atn.Transition e)
{
transitions[i] = e;
}
public virtual void RemoveTransition(int index)
{
transitions.RemoveAt(index);
}
public abstract Antlr4.Runtime.Atn.StateType StateType
{
get;
}
public bool OnlyHasEpsilonTransitions
{
get
{
return epsilonOnlyTransitions;
}
}
public virtual void SetRuleIndex(int ruleIndex)
{
this.ruleIndex = ruleIndex;
}
public virtual bool IsOptimized
{
get
{
return optimizedTransitions != transitions;
}
}
public virtual int NumberOfOptimizedTransitions
{
get
{
return optimizedTransitions.Count;
}
}
public virtual Antlr4.Runtime.Atn.Transition GetOptimizedTransition(int i)
{
return optimizedTransitions[i];
}
public virtual void AddOptimizedTransition(Antlr4.Runtime.Atn.Transition e)
{
if (!IsOptimized)
{
optimizedTransitions = new List<Antlr4.Runtime.Atn.Transition>();
}
optimizedTransitions.Add(e);
}
public virtual void SetOptimizedTransition(int i, Antlr4.Runtime.Atn.Transition e)
{
if (!IsOptimized)
{
throw new InvalidOperationException();
}
optimizedTransitions[i] = e;
}
public virtual void RemoveOptimizedTransition(int i)
{
if (!IsOptimized)
{
throw new InvalidOperationException();
}
optimizedTransitions.RemoveAt(i);
}
public ATNState()
{
optimizedTransitions = transitions;
}
}
}

Просмотреть файл

@ -0,0 +1,21 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>Represents the type of recognizer an ATN applies to.</summary>
/// <remarks>Represents the type of recognizer an ATN applies to.</remarks>
/// <author>Sam Harwell</author>
internal enum ATNType
{
Lexer,
Parser
}
}

Просмотреть файл

@ -0,0 +1,21 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal abstract class AbstractPredicateTransition : Transition
{
public AbstractPredicateTransition(ATNState target)
: base(target)
{
}
}
}

Просмотреть файл

@ -0,0 +1,62 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal sealed class ActionTransition : Transition
{
public readonly int ruleIndex;
public readonly int actionIndex;
public readonly bool isCtxDependent;
public ActionTransition(ATNState target, int ruleIndex)
: this(target, ruleIndex, -1, false)
{
}
public ActionTransition(ATNState target, int ruleIndex, int actionIndex, bool isCtxDependent)
: base(target)
{
// e.g., $i ref in action
this.ruleIndex = ruleIndex;
this.actionIndex = actionIndex;
this.isCtxDependent = isCtxDependent;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.ACTION;
}
}
public override bool IsEpsilon
{
get
{
return true;
}
}
// we are to be ignored by analysis 'cept for predicates
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return false;
}
public override string ToString()
{
return "action_" + ruleIndex + ":" + actionIndex;
}
}
}

Просмотреть файл

@ -0,0 +1,69 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>This class represents profiling event information for an ambiguity.</summary>
/// <remarks>
/// This class represents profiling event information for an ambiguity.
/// Ambiguities are decisions where a particular input resulted in an SLL
/// conflict, followed by LL prediction also reaching a conflict state
/// (indicating a true ambiguity in the grammar).
/// <p>
/// This event may be reported during SLL prediction in cases where the
/// conflicting SLL configuration set provides sufficient information to
/// determine that the SLL conflict is truly an ambiguity. For example, if none
/// of the ATN configurations in the conflicting SLL configuration set have
/// traversed a global follow transition (i.e.
/// <see cref="ATNConfig.reachesIntoOuterContext"/>
/// is
/// <see langword="false"/>
/// for all
/// configurations), then the result of SLL prediction for that input is known to
/// be equivalent to the result of LL prediction for that input.</p>
/// <p>
/// In some cases, the minimum represented alternative in the conflicting LL
/// configuration set is not equal to the minimum represented alternative in the
/// conflicting SLL configuration set. Grammars and inputs which result in this
/// scenario are unable to use
/// <see cref="PredictionMode.SLL"/>
/// , which in turn means
/// they cannot use the two-stage parsing strategy to improve parsing performance
/// for that input.</p>
/// </remarks>
/// <seealso cref="ParserATNSimulator.ReportAmbiguity(Antlr4.Runtime.Dfa.DFA, Antlr4.Runtime.Dfa.DFAState, int, int, bool, Antlr4.Runtime.Sharpen.BitSet, ATNConfigSet)"/>
/// <seealso cref="Antlr4.Runtime.IParserErrorListener.ReportAmbiguity(Antlr4.Runtime.Parser, Antlr4.Runtime.Dfa.DFA, int, int, bool, Antlr4.Runtime.Sharpen.BitSet, ATNConfigSet)"/>
/// <since>4.3</since>
internal class AmbiguityInfo : DecisionEventInfo
{
/// <summary>
/// Constructs a new instance of the
/// <see cref="AmbiguityInfo"/>
/// class with the
/// specified detailed ambiguity information.
/// </summary>
/// <param name="decision">The decision number</param>
/// <param name="state">
/// The final simulator state identifying the ambiguous
/// alternatives for the current input
/// </param>
/// <param name="input">The input token stream</param>
/// <param name="startIndex">The start index for the current prediction</param>
/// <param name="stopIndex">
/// The index at which the ambiguity was identified during
/// prediction
/// </param>
public AmbiguityInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex)
: base(decision, state, input, startIndex, stopIndex, state.useContext)
{
}
}
}

Просмотреть файл

@ -0,0 +1,127 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Text;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
#pragma warning disable 0659 // 'class' overrides Object.Equals(object o) but does not override Object.GetHashCode()
internal class ArrayPredictionContext : PredictionContext
{
/** Parent can be null only if full ctx mode and we make an array
* from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and
* returnState == {@link #EMPTY_RETURN_STATE}.
*/
public readonly PredictionContext[] parents;
/** Sorted for merge, no duplicates; if present,
* {@link #EMPTY_RETURN_STATE} is always last.
*/
public readonly int[] returnStates;
public ArrayPredictionContext(SingletonPredictionContext a)
: this(new PredictionContext[] { a.parent }, new int[] { a.returnState })
{
}
public ArrayPredictionContext(PredictionContext[] parents, int[] returnStates)
: base(CalculateHashCode(parents, returnStates))
{
// System.err.println("CREATE ARRAY: "+Arrays.toString(parents)+", "+Arrays.toString(returnStates));
this.parents = parents;
this.returnStates = returnStates;
}
public override bool IsEmpty
{
get
{
// since EMPTY_RETURN_STATE can only appear in the last position, we
// don't need to verify that size==1
return returnStates[0] == EMPTY_RETURN_STATE;
}
}
public override int Size
{
get
{
return returnStates.Length;
}
}
public override PredictionContext GetParent(int index)
{
return parents[index];
}
public override int GetReturnState(int index)
{
return returnStates[index];
}
// @Override
// public int findReturnState(int returnState) {
// return Arrays.binarySearch(returnStates, returnState);
// }
public override bool Equals(Object o)
{
if (this == o)
{
return true;
}
else if (!(o is ArrayPredictionContext))
{
return false;
}
if (this.GetHashCode() != o.GetHashCode())
{
return false; // can't be same if hash is different
}
ArrayPredictionContext a = (ArrayPredictionContext)o;
return Arrays.Equals(returnStates, a.returnStates) &&
Arrays.Equals(parents, a.parents);
}
public override String ToString()
{
if (IsEmpty)
return "[]";
StringBuilder buf = new StringBuilder();
buf.Append("[");
for (int i = 0; i < returnStates.Length; i++)
{
if (i > 0) buf.Append(", ");
if (returnStates[i] == EMPTY_RETURN_STATE)
{
buf.Append("$");
continue;
}
buf.Append(returnStates[i]);
if (parents[i] != null)
{
buf.Append(' ');
buf.Append(parents[i].ToString());
}
else {
buf.Append("null");
}
}
buf.Append("]");
return buf.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,54 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>TODO: make all transitions sets? no, should remove set edges</summary>
internal sealed class AtomTransition : Transition
{
/// <summary>The token type or character value; or, signifies special label.</summary>
/// <remarks>The token type or character value; or, signifies special label.</remarks>
public readonly int token;
public AtomTransition(ATNState target, int token)
: base(target)
{
this.token = token;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.ATOM;
}
}
public override IntervalSet Label
{
get
{
return IntervalSet.Of(token);
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return token == symbol;
}
[return: NotNull]
public override string ToString()
{
return token.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,24 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal sealed class BasicBlockStartState : BlockStartState
{
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.BlockStart;
}
}
}
}

Просмотреть файл

@ -0,0 +1,24 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal sealed class BasicState : ATNState
{
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.Basic;
}
}
}
}

Просмотреть файл

@ -0,0 +1,30 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Terminal node of a simple
/// <c>(a|b|c)</c>
/// block.
/// </summary>
internal sealed class BlockEndState : ATNState
{
public BlockStartState startState;
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.BlockEnd;
}
}
}
}

Просмотреть файл

@ -0,0 +1,22 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// The start of a regular
/// <c>(...)</c>
/// block.
/// </summary>
internal abstract class BlockStartState : DecisionState
{
public BlockEndState endState;
}
}

Просмотреть файл

@ -0,0 +1,83 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>This class stores information about a configuration conflict.</summary>
/// <author>Sam Harwell</author>
internal class ConflictInfo
{
private readonly BitSet conflictedAlts;
private readonly bool exact;
public ConflictInfo(BitSet conflictedAlts, bool exact)
{
this.conflictedAlts = conflictedAlts;
this.exact = exact;
}
/// <summary>Gets the set of conflicting alternatives for the configuration set.</summary>
/// <remarks>Gets the set of conflicting alternatives for the configuration set.</remarks>
public BitSet ConflictedAlts
{
get
{
return conflictedAlts;
}
}
/// <summary>Gets whether or not the configuration conflict is an exact conflict.</summary>
/// <remarks>
/// Gets whether or not the configuration conflict is an exact conflict.
/// An exact conflict occurs when the prediction algorithm determines that
/// the represented alternatives for a particular configuration set cannot be
/// further reduced by consuming additional input. After reaching an exact
/// conflict during an SLL prediction, only switch to full-context prediction
/// could reduce the set of viable alternatives. In LL prediction, an exact
/// conflict indicates a true ambiguity in the input.
/// <p>
/// For the
/// <see cref="PredictionMode.LL_EXACT_AMBIG_DETECTION"/>
/// prediction mode,
/// accept states are conflicting but not exact are treated as non-accept
/// states.</p>
/// </remarks>
public bool IsExact
{
get
{
return exact;
}
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.ConflictInfo))
{
return false;
}
}
Antlr4.Runtime.Atn.ConflictInfo other = (Antlr4.Runtime.Atn.ConflictInfo)obj;
return IsExact == other.IsExact && Utils.Equals(ConflictedAlts, other.ConflictedAlts);
}
public override int GetHashCode()
{
return ConflictedAlts.GetHashCode();
}
}
}

Просмотреть файл

@ -0,0 +1,56 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>This class represents profiling event information for a context sensitivity.</summary>
/// <remarks>
/// This class represents profiling event information for a context sensitivity.
/// Context sensitivities are decisions where a particular input resulted in an
/// SLL conflict, but LL prediction produced a single unique alternative.
/// <p>
/// In some cases, the unique alternative identified by LL prediction is not
/// equal to the minimum represented alternative in the conflicting SLL
/// configuration set. Grammars and inputs which result in this scenario are
/// unable to use
/// <see cref="PredictionMode.SLL"/>
/// , which in turn means they cannot use
/// the two-stage parsing strategy to improve parsing performance for that
/// input.</p>
/// </remarks>
/// <seealso cref="ParserATNSimulator.ReportContextSensitivity(Dfa.DFA, int, ATNConfigSet, int, int)"/>
/// <seealso cref="Antlr4.Runtime.IParserErrorListener.ReportContextSensitivity(Antlr4.Runtime.Parser, Antlr4.Runtime.Dfa.DFA, int, int, int, SimulatorState)"/>
/// <since>4.3</since>
internal class ContextSensitivityInfo : DecisionEventInfo
{
/// <summary>
/// Constructs a new instance of the
/// <see cref="ContextSensitivityInfo"/>
/// class
/// with the specified detailed context sensitivity information.
/// </summary>
/// <param name="decision">The decision number</param>
/// <param name="state">
/// The final simulator state containing the unique
/// alternative identified by full-context prediction
/// </param>
/// <param name="input">The input token stream</param>
/// <param name="startIndex">The start index for the current prediction</param>
/// <param name="stopIndex">
/// The index at which the context sensitivity was
/// identified during full-context prediction
/// </param>
public ContextSensitivityInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex)
: base(decision, state, input, startIndex, stopIndex, true)
{
}
}
}

Просмотреть файл

@ -0,0 +1,79 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This is the base class for gathering detailed information about prediction
/// events which occur during parsing.
/// </summary>
/// <remarks>
/// This is the base class for gathering detailed information about prediction
/// events which occur during parsing.
/// </remarks>
/// <since>4.3</since>
internal class DecisionEventInfo
{
/// <summary>The invoked decision number which this event is related to.</summary>
/// <remarks>The invoked decision number which this event is related to.</remarks>
/// <seealso cref="ATN.decisionToState"/>
public readonly int decision;
/// <summary>
/// The simulator state containing additional information relevant to the
/// prediction state when the current event occurred, or
/// <see langword="null"/>
/// if no
/// additional information is relevant or available.
/// </summary>
[Nullable]
public readonly SimulatorState state;
/// <summary>The input token stream which is being parsed.</summary>
/// <remarks>The input token stream which is being parsed.</remarks>
[NotNull]
public readonly ITokenStream input;
/// <summary>
/// The token index in the input stream at which the current prediction was
/// originally invoked.
/// </summary>
/// <remarks>
/// The token index in the input stream at which the current prediction was
/// originally invoked.
/// </remarks>
public readonly int startIndex;
/// <summary>The token index in the input stream at which the current event occurred.</summary>
/// <remarks>The token index in the input stream at which the current event occurred.</remarks>
public readonly int stopIndex;
/// <summary>
/// <see langword="true"/>
/// if the current event occurred during LL prediction;
/// otherwise,
/// <see langword="false"/>
/// if the input occurred during SLL prediction.
/// </summary>
public readonly bool fullCtx;
public DecisionEventInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex, bool fullCtx)
{
this.decision = decision;
this.fullCtx = fullCtx;
this.stopIndex = stopIndex;
this.input = input;
this.startIndex = startIndex;
this.state = state;
}
}
}

Просмотреть файл

@ -0,0 +1,251 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>This class contains profiling gathered for a particular decision.</summary>
/// <remarks>
/// This class contains profiling gathered for a particular decision.
/// <p>
/// Parsing performance in ANTLR 4 is heavily influenced by both static factors
/// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the
/// choice of input and the state of the DFA cache at the time profiling
/// operations are started). For best results, gather and use aggregate
/// statistics from a large sample of inputs representing the inputs expected in
/// production before using the results to make changes in the grammar.</p>
/// </remarks>
/// <since>4.3</since>
internal class DecisionInfo
{
/**
* The decision number, which is an index into {@link ATN#decisionToState}.
*/
public readonly int decision;
/**
* The total number of times {@link ParserATNSimulator#adaptivePredict} was
* invoked for this decision.
*/
public long invocations;
/**
* The total time spent in {@link ParserATNSimulator#adaptivePredict} for
* this decision, in nanoseconds.
*
* <p>
* The value of this field contains the sum of differential results obtained
* by {@link System#nanoTime()}, and is not adjusted to compensate for JIT
* and/or garbage collection overhead. For best accuracy, use a modern JVM
* implementation that provides precise results from
* {@link System#nanoTime()}, and perform profiling in a separate process
* which is warmed up by parsing the input prior to profiling. If desired,
* call {@link ATNSimulator#clearDFA} to reset the DFA cache to its initial
* state before starting the profiling measurement pass.</p>
*/
public long timeInPrediction;
/**
* The sum of the lookahead required for SLL prediction for this decision.
* Note that SLL prediction is used before LL prediction for performance
* reasons even when {@link PredictionMode#LL} or
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
*/
public long SLL_TotalLook;
/**
* Gets the minimum lookahead required for any single SLL prediction to
* complete for this decision, by reaching a unique prediction, reaching an
* SLL conflict state, or encountering a syntax error.
*/
public long SLL_MinLook;
/**
* Gets the maximum lookahead required for any single SLL prediction to
* complete for this decision, by reaching a unique prediction, reaching an
* SLL conflict state, or encountering a syntax error.
*/
public long SLL_MaxLook;
/**
* Gets the {@link LookaheadEventInfo} associated with the event where the
* {@link #SLL_MaxLook} value was set.
*/
public LookaheadEventInfo SLL_MaxLookEvent;
/**
* The sum of the lookahead required for LL prediction for this decision.
* Note that LL prediction is only used when SLL prediction reaches a
* conflict state.
*/
public long LL_TotalLook;
/**
* Gets the minimum lookahead required for any single LL prediction to
* complete for this decision. An LL prediction completes when the algorithm
* reaches a unique prediction, a conflict state (for
* {@link PredictionMode#LL}, an ambiguity state (for
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error.
*/
public long LL_MinLook;
/**
* Gets the maximum lookahead required for any single LL prediction to
* complete for this decision. An LL prediction completes when the algorithm
* reaches a unique prediction, a conflict state (for
* {@link PredictionMode#LL}, an ambiguity state (for
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error.
*/
public long LL_MaxLook;
/**
* Gets the {@link LookaheadEventInfo} associated with the event where the
* {@link #LL_MaxLook} value was set.
*/
public LookaheadEventInfo LL_MaxLookEvent;
/**
* A collection of {@link ContextSensitivityInfo} instances describing the
* context sensitivities encountered during LL prediction for this decision.
*
* @see ContextSensitivityInfo
*/
public readonly List<ContextSensitivityInfo> contextSensitivities = new List<ContextSensitivityInfo>();
/**
* A collection of {@link ErrorInfo} instances describing the parse errors
* identified during calls to {@link ParserATNSimulator#adaptivePredict} for
* this decision.
*
* @see ErrorInfo
*/
public readonly List<ErrorInfo> errors = new List<ErrorInfo>();
/**
* A collection of {@link AmbiguityInfo} instances describing the
* ambiguities encountered during LL prediction for this decision.
*
* @see AmbiguityInfo
*/
public readonly List<AmbiguityInfo> ambiguities = new List<AmbiguityInfo>();
/**
* A collection of {@link PredicateEvalInfo} instances describing the
* results of evaluating individual predicates during prediction for this
* decision.
*
* @see PredicateEvalInfo
*/
public readonly List<PredicateEvalInfo> predicateEvals = new List<PredicateEvalInfo>();
/**
* The total number of ATN transitions required during SLL prediction for
* this decision. An ATN transition is determined by the number of times the
* DFA does not contain an edge that is required for prediction, resulting
* in on-the-fly computation of that edge.
*
* <p>
* If DFA caching of SLL transitions is employed by the implementation, ATN
* computation may cache the computed edge for efficient lookup during
* future parsing of this decision. Otherwise, the SLL parsing algorithm
* will use ATN transitions exclusively.</p>
*
* @see #SLL_ATNTransitions
* @see ParserATNSimulator#computeTargetState
* @see LexerATNSimulator#computeTargetState
*/
public long SLL_ATNTransitions;
/**
* The total number of DFA transitions required during SLL prediction for
* this decision.
*
* <p>If the ATN simulator implementation does not use DFA caching for SLL
* transitions, this value will be 0.</p>
*
* @see ParserATNSimulator#getExistingTargetState
* @see LexerATNSimulator#getExistingTargetState
*/
public long SLL_DFATransitions;
/**
* Gets the total number of times SLL prediction completed in a conflict
* state, resulting in fallback to LL prediction.
*
* <p>Note that this value is not related to whether or not
* {@link PredictionMode#SLL} may be used successfully with a particular
* grammar. If the ambiguity resolution algorithm applied to the SLL
* conflicts for this decision produce the same result as LL prediction for
* this decision, {@link PredictionMode#SLL} would produce the same overall
* parsing result as {@link PredictionMode#LL}.</p>
*/
public long LL_Fallback;
/**
* The total number of ATN transitions required during LL prediction for
* this decision. An ATN transition is determined by the number of times the
* DFA does not contain an edge that is required for prediction, resulting
* in on-the-fly computation of that edge.
*
* <p>
* If DFA caching of LL transitions is employed by the implementation, ATN
* computation may cache the computed edge for efficient lookup during
* future parsing of this decision. Otherwise, the LL parsing algorithm will
* use ATN transitions exclusively.</p>
*
* @see #LL_DFATransitions
* @see ParserATNSimulator#computeTargetState
* @see LexerATNSimulator#computeTargetState
*/
public long LL_ATNTransitions;
/**
* The total number of DFA transitions required during LL prediction for
* this decision.
*
* <p>If the ATN simulator implementation does not use DFA caching for LL
* transitions, this value will be 0.</p>
*
* @see ParserATNSimulator#getExistingTargetState
* @see LexerATNSimulator#getExistingTargetState
*/
public long LL_DFATransitions;
/**
* Constructs a new instance of the {@link DecisionInfo} class to contain
* statistics for a particular decision.
*
* @param decision The decision number
*/
public DecisionInfo(int decision)
{
this.decision = decision;
}
public override string ToString()
{
return "{" +
"decision=" + decision +
", contextSensitivities=" + contextSensitivities.Count +
", errors=" + errors.Count +
", ambiguities=" + ambiguities.Count +
", SLL_lookahead=" + SLL_TotalLook +
", SLL_ATNTransitions=" + SLL_ATNTransitions +
", SLL_DFATransitions=" + SLL_DFATransitions +
", LL_Fallback=" + LL_Fallback +
", LL_lookahead=" + LL_TotalLook +
", LL_ATNTransitions=" + LL_ATNTransitions +
'}';
}
}
}

Просмотреть файл

@ -0,0 +1,19 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal abstract class DecisionState : ATNState
{
public int decision = -1;
public bool nonGreedy;
}
}

Просмотреть файл

@ -0,0 +1,73 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
#pragma warning disable 0659 // 'class' overrides Object.Equals(object o) but does not override Object.GetHashCode()
internal sealed class EmptyPredictionContext : SingletonPredictionContext
{
internal EmptyPredictionContext()
: base(null, EMPTY_RETURN_STATE)
{
}
public override PredictionContext GetParent(int index)
{
return null;
}
public override int GetReturnState(int index)
{
return returnState;
}
public override int Size
{
get
{
return 1;
}
}
public override bool IsEmpty
{
get
{
return true;
}
}
public override bool Equals(object o)
{
return this == o;
}
public override string ToString()
{
return "$";
}
public override string[] ToStrings(IRecognizer recognizer, int currentState)
{
return new string[] { "[]" };
}
public override string[] ToStrings(IRecognizer recognizer, PredictionContext stop, int currentState)
{
return new string[] { "[]" };
}
}
}

Просмотреть файл

@ -0,0 +1,71 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal sealed class EpsilonTransition : Transition
{
private readonly int outermostPrecedenceReturn;
public EpsilonTransition(ATNState target)
: this(target, -1)
{
}
public EpsilonTransition(ATNState target, int outermostPrecedenceReturn)
: base(target)
{
this.outermostPrecedenceReturn = outermostPrecedenceReturn;
}
/// <returns>
/// the rule index of a precedence rule for which this transition is
/// returning from, where the precedence value is 0; otherwise, -1.
/// </returns>
/// <seealso cref="ATNConfig.IsPrecedenceFilterSuppressed"/>
/// <seealso cref="ParserATNSimulator.ApplyPrecedenceFilter(ATNConfigSet)"/>
/// <since>4.4.1</since>
public int OutermostPrecedenceReturn
{
get
{
return outermostPrecedenceReturn;
}
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.EPSILON;
}
}
public override bool IsEpsilon
{
get
{
return true;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return false;
}
[return: NotNull]
public override string ToString()
{
return "epsilon";
}
}
}

Просмотреть файл

@ -0,0 +1,50 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This class represents profiling event information for a syntax error
/// identified during prediction.
/// </summary>
/// <remarks>
/// This class represents profiling event information for a syntax error
/// identified during prediction. Syntax errors occur when the prediction
/// algorithm is unable to identify an alternative which would lead to a
/// successful parse.
/// </remarks>
/// <seealso cref="Parser.NotifyErrorListeners(IToken, string, RecognitionException)"/>
/// <seealso cref="IAntlrErrorListener{TSymbol}.SyntaxError"/>
/// <since>4.3</since>
internal class ErrorInfo : DecisionEventInfo
{
/// <summary>
/// Constructs a new instance of the
/// <see cref="ErrorInfo"/>
/// class with the
/// specified detailed syntax error information.
/// </summary>
/// <param name="decision">The decision number</param>
/// <param name="state">
/// The final simulator state reached during prediction
/// prior to reaching the
/// <see cref="ATNSimulator.ERROR"/>
/// state
/// </param>
/// <param name="input">The input token stream</param>
/// <param name="startIndex">The start index for the current prediction</param>
/// <param name="stopIndex">The index at which the syntax error was identified</param>
public ErrorInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex)
: base(decision, state, input, startIndex, stopIndex, state.useContext)
{
}
}
}

Просмотреть файл

@ -0,0 +1,81 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Represents a single action which can be executed following the successful
/// match of a lexer rule.
/// </summary>
/// <remarks>
/// Represents a single action which can be executed following the successful
/// match of a lexer rule. Lexer actions are used for both embedded action syntax
/// and ANTLR 4's new lexer command syntax.
/// </remarks>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal interface ILexerAction
{
/// <summary>Gets the serialization type of the lexer action.</summary>
/// <remarks>Gets the serialization type of the lexer action.</remarks>
/// <returns>The serialization type of the lexer action.</returns>
[NotNull]
LexerActionType ActionType
{
get;
}
/// <summary>Gets whether the lexer action is position-dependent.</summary>
/// <remarks>
/// Gets whether the lexer action is position-dependent. Position-dependent
/// actions may have different semantics depending on the
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// index at the time the action is executed.
/// <p>Many lexer commands, including
/// <c>type</c>
/// ,
/// <c>skip</c>
/// , and
/// <c>more</c>
/// , do not check the input index during their execution.
/// Actions like this are position-independent, and may be stored more
/// efficiently as part of the
/// <see cref="LexerATNConfig.lexerActionExecutor"/>
/// .</p>
/// </remarks>
/// <returns>
///
/// <see langword="true"/>
/// if the lexer action semantics can be affected by the
/// position of the input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// at the time it is executed;
/// otherwise,
/// <see langword="false"/>
/// .
/// </returns>
bool IsPositionDependent
{
get;
}
/// <summary>
/// Execute the lexer action in the context of the specified
/// <see cref="Antlr4.Runtime.Lexer"/>
/// .
/// <p>For position-dependent actions, the input stream must already be
/// positioned correctly prior to calling this method.</p>
/// </summary>
/// <param name="lexer">The lexer instance.</param>
void Execute(Lexer lexer);
}
}

Просмотреть файл

@ -0,0 +1,378 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal class LL1Analyzer
{
/// <summary>
/// Special value added to the lookahead sets to indicate that we hit
/// a predicate during analysis if
/// <c>seeThruPreds==false</c>
/// .
/// </summary>
public const int HitPred = TokenConstants.InvalidType;
[NotNull]
public readonly ATN atn;
public LL1Analyzer(ATN atn)
{
this.atn = atn;
}
/// <summary>
/// Calculates the SLL(1) expected lookahead set for each outgoing transition
/// of an
/// <see cref="ATNState"/>
/// . The returned array has one element for each
/// outgoing transition in
/// <paramref name="s"/>
/// . If the closure from transition
/// <em>i</em> leads to a semantic predicate before matching a symbol, the
/// element at index <em>i</em> of the result will be
/// <see langword="null"/>
/// .
/// </summary>
/// <param name="s">the ATN state</param>
/// <returns>
/// the expected symbols for each outgoing transition of
/// <paramref name="s"/>
/// .
/// </returns>
[return: Nullable]
public virtual IntervalSet[] GetDecisionLookahead(ATNState s)
{
// System.out.println("LOOK("+s.stateNumber+")");
if (s == null)
{
return null;
}
IntervalSet[] look = new IntervalSet[s.NumberOfTransitions];
for (int alt = 0; alt < s.NumberOfTransitions; alt++)
{
look[alt] = new IntervalSet();
HashSet<ATNConfig> lookBusy = new HashSet<ATNConfig>();
bool seeThruPreds = false;
// fail to get lookahead upon pred
Look(s.Transition(alt).target, null, PredictionContext.EMPTY, look[alt], lookBusy, new BitSet(), seeThruPreds, false);
// Wipe out lookahead for this alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if (look[alt].Count == 0 || look[alt].Contains(HitPred))
{
look[alt] = null;
}
}
return look;
}
/// <summary>
/// Compute set of tokens that can follow
/// <paramref name="s"/>
/// in the ATN in the
/// specified
/// <paramref name="ctx"/>
/// .
/// <p>If
/// <paramref name="ctx"/>
/// is
/// <see langword="null"/>
/// and the end of the rule containing
/// <paramref name="s"/>
/// is reached,
/// <see cref="TokenConstants.EPSILON"/>
/// is added to the result set.
/// If
/// <paramref name="ctx"/>
/// is not
/// <see langword="null"/>
/// and the end of the outermost rule is
/// reached,
/// <see cref="TokenConstants.EOF"/>
/// is added to the result set.</p>
/// </summary>
/// <param name="s">the ATN state</param>
/// <param name="ctx">
/// the complete parser context, or
/// <see langword="null"/>
/// if the context
/// should be ignored
/// </param>
/// <returns>
/// The set of tokens that can follow
/// <paramref name="s"/>
/// in the ATN in the
/// specified
/// <paramref name="ctx"/>
/// .
/// </returns>
[return: NotNull]
public virtual IntervalSet Look(ATNState s, RuleContext ctx)
{
return Look(s, null, ctx);
}
/// <summary>
/// Compute set of tokens that can follow
/// <paramref name="s"/>
/// in the ATN in the
/// specified
/// <paramref name="ctx"/>
/// .
/// <p>If
/// <paramref name="ctx"/>
/// is
/// <see langword="null"/>
/// and the end of the rule containing
/// <paramref name="s"/>
/// is reached,
/// <see cref="TokenConstants.EPSILON"/>
/// is added to the result set.
/// If
/// <paramref name="ctx"/>
/// is not
/// <c>PredictionContext#EMPTY_LOCAL</c>
/// and the end of the outermost rule is
/// reached,
/// <see cref="TokenConstants.EOF"/>
/// is added to the result set.</p>
/// </summary>
/// <param name="s">the ATN state</param>
/// <param name="stopState">
/// the ATN state to stop at. This can be a
/// <see cref="BlockEndState"/>
/// to detect epsilon paths through a closure.
/// </param>
/// <param name="ctx">
/// the complete parser context, or
/// <see langword="null"/>
/// if the context
/// should be ignored
/// </param>
/// <returns>
/// The set of tokens that can follow
/// <paramref name="s"/>
/// in the ATN in the
/// specified
/// <paramref name="ctx"/>
/// .
/// </returns>
[return: NotNull]
public virtual IntervalSet Look(ATNState s, ATNState stopState, RuleContext ctx)
{
IntervalSet r = new IntervalSet();
bool seeThruPreds = true;
PredictionContext lookContext = ctx != null ? PredictionContext.FromRuleContext(s.atn, ctx) : null;
Look(s, stopState, lookContext, r, new HashSet<ATNConfig>(), new BitSet(), seeThruPreds, true);
return r;
}
/// <summary>
/// Compute set of tokens that can follow
/// <paramref name="s"/>
/// in the ATN in the
/// specified
/// <paramref name="ctx"/>
/// .
/// <p/>
/// If
/// <paramref name="ctx"/>
/// is
/// <see cref="PredictionContext.EMPTY"/>
/// and
/// <paramref name="stopState"/>
/// or the end of the rule containing
/// <paramref name="s"/>
/// is reached,
/// <see cref="TokenConstants.EPSILON"/>
/// is added to the result set. If
/// <paramref name="ctx"/>
/// is not
/// <see cref="PredictionContext.EMPTY"/>
/// and
/// <paramref name="addEOF"/>
/// is
/// <see langword="true"/>
/// and
/// <paramref name="stopState"/>
/// or the end of the outermost rule is reached,
/// <see cref="TokenConstants.EOF"/>
/// is added to the result set.
/// </summary>
/// <param name="s">the ATN state.</param>
/// <param name="stopState">
/// the ATN state to stop at. This can be a
/// <see cref="BlockEndState"/>
/// to detect epsilon paths through a closure.
/// </param>
/// <param name="ctx">
/// The outer context, or
/// <see cref="PredictionContext.EMPTY"/>
/// if
/// the outer context should not be used.
/// </param>
/// <param name="look">The result lookahead set.</param>
/// <param name="lookBusy">
/// A set used for preventing epsilon closures in the ATN
/// from causing a stack overflow. Outside code should pass
/// <c>new HashSet&lt;ATNConfig&gt;</c>
/// for this argument.
/// </param>
/// <param name="calledRuleStack">
/// A set used for preventing left recursion in the
/// ATN from causing a stack overflow. Outside code should pass
/// <c>new BitSet()</c>
/// for this argument.
/// </param>
/// <param name="seeThruPreds">
///
/// <see langword="true"/>
/// to true semantic predicates as
/// implicitly
/// <see langword="true"/>
/// and "see through them", otherwise
/// <see langword="false"/>
/// to treat semantic predicates as opaque and add
/// <see cref="HitPred"/>
/// to the
/// result if one is encountered.
/// </param>
/// <param name="addEOF">
/// Add
/// <see cref="TokenConstants.EOF"/>
/// to the result if the end of the
/// outermost context is reached. This parameter has no effect if
/// <paramref name="ctx"/>
/// is
/// <see cref="PredictionContext.EMPTY"/>
/// .
/// </param>
protected internal virtual void Look(ATNState s, ATNState stopState, PredictionContext ctx, IntervalSet look, HashSet<ATNConfig> lookBusy, BitSet calledRuleStack, bool seeThruPreds, bool addEOF)
{
// System.out.println("_LOOK("+s.stateNumber+", ctx="+ctx);
ATNConfig c = new ATNConfig(s, 0, ctx);
if (!lookBusy.Add(c))
{
return;
}
if (s == stopState)
{
if (ctx == null)
{
look.Add(TokenConstants.EPSILON);
return;
}
else if (ctx.IsEmpty && addEOF) {
look.Add(TokenConstants.EOF);
return;
}
}
if (s is RuleStopState)
{
if (ctx == null)
{
look.Add(TokenConstants.EPSILON);
return;
}
else if (ctx.IsEmpty && addEOF)
{
look.Add(TokenConstants.EOF);
return;
}
if (ctx != PredictionContext.EMPTY)
{
for (int i = 0; i < ctx.Size; i++)
{
ATNState returnState = atn.states[ctx.GetReturnState(i)];
bool removed = calledRuleStack.Get(returnState.ruleIndex);
try
{
calledRuleStack.Clear(returnState.ruleIndex);
Look(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
}
finally
{
if (removed)
{
calledRuleStack.Set(returnState.ruleIndex);
}
}
}
return;
}
}
int n = s.NumberOfTransitions;
for (int i_1 = 0; i_1 < n; i_1++)
{
Transition t = s.Transition(i_1);
if (t is RuleTransition)
{
RuleTransition ruleTransition = (RuleTransition)t;
if (calledRuleStack.Get(ruleTransition.ruleIndex))
{
continue;
}
PredictionContext newContext = SingletonPredictionContext.Create(ctx, ruleTransition.followState.stateNumber);
try
{
calledRuleStack.Set(ruleTransition.target.ruleIndex);
Look(t.target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
}
finally
{
calledRuleStack.Clear(ruleTransition.target.ruleIndex);
}
}
else
{
if (t is AbstractPredicateTransition)
{
if (seeThruPreds)
{
Look(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
}
else
{
look.Add(HitPred);
}
}
else
{
if (t.IsEpsilon)
{
Look(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
}
else
{
if (t is WildcardTransition)
{
look.AddAll(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType));
}
else
{
IntervalSet set = t.Label;
if (set != null)
{
if (t is NotSetTransition)
{
set = set.Complement(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType));
}
look.AddAll(set);
}
}
}
}
}
}
}
}
}

Просмотреть файл

@ -0,0 +1,123 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Atn
{
internal class LexerATNConfig : ATNConfig
{
/**
* This is the backing field for {@link #getLexerActionExecutor}.
*/
private readonly LexerActionExecutor lexerActionExecutor;
private readonly bool passedThroughNonGreedyDecision;
public LexerATNConfig(ATNState state,
int alt,
PredictionContext context)
: base(state, alt, context/*, SemanticContext.NONE*/) // TODO
{
this.passedThroughNonGreedyDecision = false;
this.lexerActionExecutor = null;
}
public LexerATNConfig(ATNState state,
int alt,
PredictionContext context,
LexerActionExecutor lexerActionExecutor)
: base(state, alt, context, SemanticContext.NONE)
{
this.lexerActionExecutor = lexerActionExecutor;
this.passedThroughNonGreedyDecision = false;
}
public LexerATNConfig(LexerATNConfig c, ATNState state)
: base(c, state, c.context, c.semanticContext)
{
this.lexerActionExecutor = c.lexerActionExecutor;
this.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state);
}
public LexerATNConfig(LexerATNConfig c, ATNState state,
LexerActionExecutor lexerActionExecutor)
: base(c, state, c.context, c.semanticContext)
{
this.lexerActionExecutor = lexerActionExecutor;
this.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state);
}
public LexerATNConfig(LexerATNConfig c, ATNState state,
PredictionContext context)
: base(c, state, context, c.semanticContext)
{
this.lexerActionExecutor = c.lexerActionExecutor;
this.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state);
}
/**
* Gets the {@link LexerActionExecutor} capable of executing the embedded
* action(s) for the current configuration.
*/
public LexerActionExecutor getLexerActionExecutor()
{
return lexerActionExecutor;
}
public bool hasPassedThroughNonGreedyDecision()
{
return passedThroughNonGreedyDecision;
}
public override int GetHashCode()
{
int hashCode = MurmurHash.Initialize(7);
hashCode = MurmurHash.Update(hashCode, state.stateNumber);
hashCode = MurmurHash.Update(hashCode, alt);
hashCode = MurmurHash.Update(hashCode, context);
hashCode = MurmurHash.Update(hashCode, semanticContext);
hashCode = MurmurHash.Update(hashCode, passedThroughNonGreedyDecision ? 1 : 0);
hashCode = MurmurHash.Update(hashCode, lexerActionExecutor);
hashCode = MurmurHash.Finish(hashCode, 6);
return hashCode;
}
public override bool Equals(ATNConfig other)
{
if (this == other)
{
return true;
}
else if (!(other is LexerATNConfig))
{
return false;
}
LexerATNConfig lexerOther = (LexerATNConfig)other;
if (passedThroughNonGreedyDecision != lexerOther.passedThroughNonGreedyDecision)
{
return false;
}
if (!(lexerActionExecutor==null ? lexerOther.lexerActionExecutor==null : lexerActionExecutor.Equals(lexerOther.lexerActionExecutor)))
{
return false;
}
return base.Equals(other);
}
private static bool checkNonGreedyDecision(LexerATNConfig source, ATNState target)
{
return source.passedThroughNonGreedyDecision
|| target is DecisionState && ((DecisionState)target).nonGreedy;
}
}
}

Просмотреть файл

@ -0,0 +1,842 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/*
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Atn
{
/// <summary>"dup" of ParserInterpreter</summary>
internal class LexerATNSimulator : ATNSimulator
{
public readonly bool debug = false;
public readonly bool dfa_debug = false;
public static readonly int MIN_DFA_EDGE = 0;
public static readonly int MAX_DFA_EDGE = 127; // forces unicode to stay in ATN
protected readonly Lexer recog;
/** The current token's starting index into the character stream.
* Shared across DFA to ATN simulation in case the ATN fails and the
* DFA did not have a previous accept state. In this case, we use the
* ATN-generated exception object.
*/
protected int startIndex = -1;
/** line number 1..n within the input */
protected int thisLine = 1;
/** The index of the character relative to the beginning of the line 0..n-1 */
protected int charPositionInLine = 0;
public readonly DFA[] decisionToDFA;
protected int mode = Lexer.DEFAULT_MODE;
/** Used during DFA/ATN exec to record the most recent accept configuration info */
readonly SimState prevAccept = new SimState();
public static int match_calls = 0;
public LexerATNSimulator(ATN atn, DFA[] decisionToDFA,
PredictionContextCache sharedContextCache)
: this(null, atn, decisionToDFA, sharedContextCache)
{
}
public LexerATNSimulator(Lexer recog, ATN atn,
DFA[] decisionToDFA,
PredictionContextCache sharedContextCache)
: base(atn, sharedContextCache)
{
this.decisionToDFA = decisionToDFA;
this.recog = recog;
}
public void CopyState(LexerATNSimulator simulator)
{
this.charPositionInLine = simulator.charPositionInLine;
this.thisLine = simulator.thisLine;
this.mode = simulator.mode;
this.startIndex = simulator.startIndex;
}
public int Match(ICharStream input, int mode)
{
match_calls++;
this.mode = mode;
int mark = input.Mark();
try
{
this.startIndex = input.Index;
this.prevAccept.Reset();
DFA dfa = decisionToDFA[mode];
if (dfa.s0 == null)
{
return MatchATN(input);
}
else
{
return ExecATN(input, dfa.s0);
}
}
finally
{
input.Release(mark);
}
}
public override void Reset()
{
prevAccept.Reset();
startIndex = -1;
thisLine = 1;
charPositionInLine = 0;
mode = Lexer.DEFAULT_MODE;
}
public override void ClearDFA()
{
for (int d = 0; d < decisionToDFA.Length; d++)
{
decisionToDFA[d] = new DFA(atn.GetDecisionState(d), d);
}
}
protected int MatchATN(ICharStream input)
{
ATNState startState = atn.modeToStartState[mode];
if (debug)
{
ConsoleWriteLine("matchATN mode " + mode + " start: " + startState);
}
int old_mode = mode;
ATNConfigSet s0_closure = ComputeStartState(input, startState);
bool suppressEdge = s0_closure.hasSemanticContext;
s0_closure.hasSemanticContext = false;
DFAState next = AddDFAState(s0_closure);
if (!suppressEdge)
{
decisionToDFA[mode].s0 = next;
}
int predict = ExecATN(input, next);
if (debug)
{
ConsoleWriteLine("DFA after matchATN: " + decisionToDFA[old_mode].ToString());
}
return predict;
}
protected int ExecATN(ICharStream input, DFAState ds0)
{
//System.out.println("enter exec index "+input.index()+" from "+ds0.configs);
if (debug)
{
ConsoleWriteLine("start state closure=" + ds0.configSet);
}
if (ds0.isAcceptState)
{
// allow zero-length tokens
CaptureSimState(prevAccept, input, ds0);
}
int t = input.LA(1);
DFAState s = ds0; // s is current/from DFA state
while (true)
{ // while more work
if (debug)
{
ConsoleWriteLine("execATN loop starting closure: " + s.configSet);
}
// As we move src->trg, src->trg, we keep track of the previous trg to
// avoid looking up the DFA state again, which is expensive.
// If the previous target was already part of the DFA, we might
// be able to avoid doing a reach operation upon t. If s!=null,
// it means that semantic predicates didn't prevent us from
// creating a DFA state. Once we know s!=null, we check to see if
// the DFA state has an edge already for t. If so, we can just reuse
// it's configuration set; there's no point in re-computing it.
// This is kind of like doing DFA simulation within the ATN
// simulation because DFA simulation is really just a way to avoid
// computing reach/closure sets. Technically, once we know that
// we have a previously added DFA state, we could jump over to
// the DFA simulator. But, that would mean popping back and forth
// a lot and making things more complicated algorithmically.
// This optimization makes a lot of sense for loops within DFA.
// A character will take us back to an existing DFA state
// that already has lots of edges out of it. e.g., .* in comments.
DFAState target = GetExistingTargetState(s, t);
if (target == null)
{
target = ComputeTargetState(input, s, t);
}
if (target == ERROR)
{
break;
}
// If this is a consumable input element, make sure to consume before
// capturing the accept state so the input index, line, and char
// position accurately reflect the state of the interpreter at the
// end of the token.
if (t != IntStreamConstants.EOF)
{
Consume(input);
}
if (target.isAcceptState)
{
CaptureSimState(prevAccept, input, target);
if (t == IntStreamConstants.EOF)
{
break;
}
}
t = input.LA(1);
s = target; // flip; current DFA target becomes new src/from state
}
return FailOrAccept(prevAccept, input, s.configSet, t);
}
/**
* Get an existing target state for an edge in the DFA. If the target state
* for the edge has not yet been computed or is otherwise not available,
* this method returns {@code null}.
*
* @param s The current DFA state
* @param t The next input symbol
* @return The existing target DFA state for the given input symbol
* {@code t}, or {@code null} if the target state for this edge is not
* already cached
*/
protected DFAState GetExistingTargetState(DFAState s, int t)
{
if (s.edges == null || t < MIN_DFA_EDGE || t > MAX_DFA_EDGE)
{
return null;
}
DFAState target = s.edges[t - MIN_DFA_EDGE];
if (debug && target != null)
{
ConsoleWriteLine("reuse state " + s.stateNumber + " edge to " + target.stateNumber);
}
return target;
}
/**
* Compute a target state for an edge in the DFA, and attempt to add the
* computed state and corresponding edge to the DFA.
*
* @param input The input stream
* @param s The current DFA state
* @param t The next input symbol
*
* @return The computed target DFA state for the given input symbol
* {@code t}. If {@code t} does not lead to a valid DFA state, this method
* returns {@link #ERROR}.
*/
protected DFAState ComputeTargetState(ICharStream input, DFAState s, int t)
{
ATNConfigSet reach = new OrderedATNConfigSet();
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
GetReachableConfigSet(input, s.configSet, reach, t);
if (reach.Empty)
{ // we got nowhere on t from s
if (!reach.hasSemanticContext)
{
// we got nowhere on t, don't throw out this knowledge; it'd
// cause a failover from DFA later.
AddDFAEdge(s, t, ERROR);
}
// stop when we can't match any more char
return ERROR;
}
// Add an edge from s to target DFA found/created for reach
return AddDFAEdge(s, t, reach);
}
protected int FailOrAccept(SimState prevAccept, ICharStream input,
ATNConfigSet reach, int t)
{
if (prevAccept.dfaState != null)
{
LexerActionExecutor lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor;
Accept(input, lexerActionExecutor, startIndex,
prevAccept.index, prevAccept.line, prevAccept.charPos);
return prevAccept.dfaState.prediction;
}
else {
// if no accept and EOF is first char, return EOF
if (t == IntStreamConstants.EOF && input.Index == startIndex)
{
return TokenConstants.EOF;
}
throw new LexerNoViableAltException(recog, input, startIndex, reach);
}
}
/** Given a starting configuration set, figure out all ATN configurations
* we can reach upon input {@code t}. Parameter {@code reach} is a return
* parameter.
*/
protected void GetReachableConfigSet(ICharStream input, ATNConfigSet closure, ATNConfigSet reach, int t)
{
// this is used to skip processing for configs which have a lower priority
// than a config that already reached an accept state for the same rule
int skipAlt = ATN.INVALID_ALT_NUMBER;
foreach (ATNConfig c in closure.configs)
{
bool currentAltReachedAcceptState = c.alt == skipAlt;
if (currentAltReachedAcceptState && ((LexerATNConfig)c).hasPassedThroughNonGreedyDecision())
{
continue;
}
if (debug)
{
ConsoleWriteLine("testing " + GetTokenName(t) + " at " + c.ToString(recog, true));
}
int n = c.state.NumberOfTransitions;
for (int ti = 0; ti < n; ti++)
{ // for each transition
Transition trans = c.state.Transition(ti);
ATNState target = GetReachableTarget(trans, t);
if (target != null)
{
LexerActionExecutor lexerActionExecutor = ((LexerATNConfig)c).getLexerActionExecutor();
if (lexerActionExecutor != null)
{
lexerActionExecutor = lexerActionExecutor.FixOffsetBeforeMatch(input.Index - startIndex);
}
bool treatEofAsEpsilon = t == IntStreamConstants.EOF;
if (Closure(input, new LexerATNConfig((LexerATNConfig)c, target, lexerActionExecutor), reach, currentAltReachedAcceptState, true, treatEofAsEpsilon))
{
// any remaining configs for this alt have a lower priority than
// the one that just reached an accept state.
skipAlt = c.alt;
break;
}
}
}
}
}
protected void Accept(ICharStream input, LexerActionExecutor lexerActionExecutor,
int startIndex, int index, int line, int charPos)
{
if (debug)
{
ConsoleWriteLine("ACTION " + lexerActionExecutor);
}
// seek to after last char in token
input.Seek(index);
this.thisLine = line;
this.charPositionInLine = charPos;
if (lexerActionExecutor != null && recog != null)
{
lexerActionExecutor.Execute(recog, input, startIndex);
}
}
protected ATNState GetReachableTarget(Transition trans, int t)
{
if (trans.Matches(t, Lexer.MinCharValue, Lexer.MaxCharValue))
{
return trans.target;
}
return null;
}
protected ATNConfigSet ComputeStartState(ICharStream input,
ATNState p)
{
PredictionContext initialContext = PredictionContext.EMPTY;
ATNConfigSet configs = new OrderedATNConfigSet();
for (int i = 0; i < p.NumberOfTransitions; i++)
{
ATNState target = p.Transition(i).target;
LexerATNConfig c = new LexerATNConfig(target, i + 1, initialContext);
Closure(input, c, configs, false, false, false);
}
return configs;
}
/**
* Since the alternatives within any lexer decision are ordered by
* preference, this method stops pursuing the closure as soon as an accept
* state is reached. After the first accept state is reached by depth-first
* search from {@code config}, all other (potentially reachable) states for
* this rule would have a lower priority.
*
* @return {@code true} if an accept state is reached, otherwise
* {@code false}.
*/
protected bool Closure(ICharStream input, LexerATNConfig config, ATNConfigSet configs, bool currentAltReachedAcceptState, bool speculative, bool treatEofAsEpsilon)
{
if (debug)
{
ConsoleWriteLine("closure(" + config.ToString(recog, true) + ")");
}
if (config.state is RuleStopState)
{
if (debug)
{
if (recog != null)
{
ConsoleWriteLine("closure at " + recog.RuleNames[config.state.ruleIndex] + " rule stop " + config);
}
else {
ConsoleWriteLine("closure at rule stop " + config);
}
}
if (config.context == null || config.context.HasEmptyPath)
{
if (config.context == null || config.context.IsEmpty)
{
configs.Add(config);
return true;
}
else {
configs.Add(new LexerATNConfig(config, config.state, PredictionContext.EMPTY));
currentAltReachedAcceptState = true;
}
}
if (config.context != null && !config.context.IsEmpty)
{
for (int i = 0; i < config.context.Size; i++)
{
if (config.context.GetReturnState(i) != PredictionContext.EMPTY_RETURN_STATE)
{
PredictionContext newContext = config.context.GetParent(i); // "pop" return state
ATNState returnState = atn.states[config.context.GetReturnState(i)];
LexerATNConfig c = new LexerATNConfig(config, returnState, newContext);
currentAltReachedAcceptState = Closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon);
}
}
}
return currentAltReachedAcceptState;
}
// optimization
if (!config.state.OnlyHasEpsilonTransitions)
{
if (!currentAltReachedAcceptState || !config.hasPassedThroughNonGreedyDecision())
{
configs.Add(config);
}
}
ATNState p = config.state;
for (int i = 0; i < p.NumberOfTransitions; i++)
{
Transition t = p.Transition(i);
LexerATNConfig c = GetEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon);
if (c != null)
{
currentAltReachedAcceptState = Closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon);
}
}
return currentAltReachedAcceptState;
}
// side-effect: can alter configs.hasSemanticContext
protected LexerATNConfig GetEpsilonTarget(ICharStream input,
LexerATNConfig config,
Transition t,
ATNConfigSet configs,
bool speculative,
bool treatEofAsEpsilon)
{
LexerATNConfig c = null;
switch (t.TransitionType)
{
case TransitionType.RULE:
RuleTransition ruleTransition = (RuleTransition)t;
PredictionContext newContext = new SingletonPredictionContext(config.context, ruleTransition.followState.stateNumber);
c = new LexerATNConfig(config, t.target, newContext);
break;
case TransitionType.PRECEDENCE:
throw new Exception("Precedence predicates are not supported in lexers.");
case TransitionType.PREDICATE:
/* Track traversing semantic predicates. If we traverse,
we cannot add a DFA state for this "reach" computation
because the DFA would not test the predicate again in the
future. Rather than creating collections of semantic predicates
like v3 and testing them on prediction, v4 will test them on the
fly all the time using the ATN not the DFA. This is slower but
semantically it's not used that often. One of the key elements to
this predicate mechanism is not adding DFA states that see
predicates immediately afterwards in the ATN. For example,
a : ID {p1}? | ID {p2}? ;
should create the start state for rule 'a' (to save start state
competition), but should not create target of ID state. The
collection of ATN states the following ID references includes
states reached by traversing predicates. Since this is when we
test them, we cannot cash the DFA state target of ID.
*/
PredicateTransition pt = (PredicateTransition)t;
if (debug)
{
ConsoleWriteLine("EVAL rule " + pt.ruleIndex + ":" + pt.predIndex);
}
configs.hasSemanticContext = true;
if (EvaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative))
{
c = new LexerATNConfig(config, t.target);
}
break;
case TransitionType.ACTION:
if (config.context == null || config.context.HasEmptyPath)
{
// execute actions anywhere in the start rule for a token.
//
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
// isEmpty() is false. In this case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
// getEpsilonTarget to return two configurations, so
// additional modifications are needed before we can support
// the split operation.
LexerActionExecutor lexerActionExecutor = LexerActionExecutor.Append(config.getLexerActionExecutor(), atn.lexerActions[((ActionTransition)t).actionIndex]);
c = new LexerATNConfig(config, t.target, lexerActionExecutor);
break;
}
else {
// ignore actions in referenced rules
c = new LexerATNConfig(config, t.target);
break;
}
case TransitionType.EPSILON:
c = new LexerATNConfig(config, t.target);
break;
case TransitionType.ATOM:
case TransitionType.RANGE:
case TransitionType.SET:
if (treatEofAsEpsilon)
{
if (t.Matches(IntStreamConstants.EOF, Lexer.MinCharValue, Lexer.MaxCharValue))
{
c = new LexerATNConfig(config, t.target);
break;
}
}
break;
}
return c;
}
/**
* Evaluate a predicate specified in the lexer.
*
* <p>If {@code speculative} is {@code true}, this method was called before
* {@link #consume} for the matched character. This method should call
* {@link #consume} before evaluating the predicate to ensure position
* sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
* and {@link Lexer#getCharPositionInLine}, properly reflect the current
* lexer state. This method should restore {@code input} and the simulator
* to the original state before returning (i.e. undo the actions made by the
* call to {@link #consume}.</p>
*
* @param input The input stream.
* @param ruleIndex The rule containing the predicate.
* @param predIndex The index of the predicate within the rule.
* @param speculative {@code true} if the current index in {@code input} is
* one character before the predicate's location.
*
* @return {@code true} if the specified predicate evaluates to
* {@code true}.
*/
protected bool EvaluatePredicate(ICharStream input, int ruleIndex, int predIndex, bool speculative)
{
// assume true if no recognizer was provided
if (recog == null)
{
return true;
}
if (!speculative)
{
return recog.Sempred(null, ruleIndex, predIndex);
}
int savedCharPositionInLine = charPositionInLine;
int savedLine = thisLine;
int index = input.Index;
int marker = input.Mark();
try
{
Consume(input);
return recog.Sempred(null, ruleIndex, predIndex);
}
finally
{
charPositionInLine = savedCharPositionInLine;
thisLine = savedLine;
input.Seek(index);
input.Release(marker);
}
}
protected void CaptureSimState(SimState settings,
ICharStream input,
DFAState dfaState)
{
settings.index = input.Index;
settings.line = thisLine;
settings.charPos = charPositionInLine;
settings.dfaState = dfaState;
}
protected DFAState AddDFAEdge(DFAState from,
int t,
ATNConfigSet q)
{
/* leading to this call, ATNConfigSet.hasSemanticContext is used as a
* marker indicating dynamic predicate evaluation makes this edge
* dependent on the specific input sequence, so the static edge in the
* DFA should be omitted. The target DFAState is still created since
* execATN has the ability to resynchronize with the DFA state cache
* following the predicate evaluation step.
*
* TJP notes: next time through the DFA, we see a pred again and eval.
* If that gets us to a previously created (but dangling) DFA
* state, we can continue in pure DFA mode from there.
*/
bool suppressEdge = q.hasSemanticContext;
q.hasSemanticContext = false;
DFAState to = AddDFAState(q);
if (suppressEdge)
{
return to;
}
AddDFAEdge(from, t, to);
return to;
}
protected void AddDFAEdge(DFAState p, int t, DFAState q)
{
if (t < MIN_DFA_EDGE || t > MAX_DFA_EDGE)
{
// Only track edges within the DFA bounds
return;
}
if (debug)
{
ConsoleWriteLine("EDGE " + p + " -> " + q + " upon " + ((char)t));
}
lock (p)
{
if (p.edges == null)
{
// make room for tokens 1..n and -1 masquerading as index 0
p.edges = new DFAState[MAX_DFA_EDGE - MIN_DFA_EDGE + 1];
}
p.edges[t - MIN_DFA_EDGE] = q; // connect
}
}
/** Add a new DFA state if there isn't one with this set of
configurations already. This method also detects the first
configuration containing an ATN rule stop state. Later, when
traversing the DFA, we will know which rule to accept.
*/
protected DFAState AddDFAState(ATNConfigSet configSet)
{
/* the lexer evaluates predicates on-the-fly; by this point configs
* should not contain any configurations with unevaluated predicates.
*/
DFAState proposed = new DFAState(configSet);
ATNConfig firstConfigWithRuleStopState = null;
foreach (ATNConfig c in configSet.configs)
{
if (c.state is RuleStopState)
{
firstConfigWithRuleStopState = c;
break;
}
}
if (firstConfigWithRuleStopState != null)
{
proposed.isAcceptState = true;
proposed.lexerActionExecutor = ((LexerATNConfig)firstConfigWithRuleStopState).getLexerActionExecutor();
proposed.prediction = atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex];
}
DFA dfa = decisionToDFA[mode];
lock (dfa.states)
{
DFAState existing;
if(dfa.states.TryGetValue(proposed, out existing))
return existing;
DFAState newState = proposed;
newState.stateNumber = dfa.states.Count;
configSet.IsReadOnly = true;
newState.configSet = configSet;
dfa.states[newState] = newState;
return newState;
}
}
public DFA GetDFA(int mode)
{
return decisionToDFA[mode];
}
/** Get the text matched so far for the current token.
*/
public String GetText(ICharStream input)
{
// index is first lookahead char, don't include.
return input.GetText(Interval.Of(startIndex, input.Index - 1));
}
public int Line
{
get
{
return thisLine;
}
set
{
this.thisLine = value;
}
}
public int Column
{
get
{
return charPositionInLine;
}
set
{
this.charPositionInLine = value;
}
}
public void Consume(ICharStream input)
{
int curChar = input.LA(1);
if (curChar == '\n')
{
thisLine++;
charPositionInLine = 0;
}
else {
charPositionInLine++;
}
input.Consume();
}
public String GetTokenName(int t)
{
if (t == -1) return "EOF";
//if ( atn.g!=null ) return atn.g.getTokenDisplayName(t);
return "'" + (char)t + "'";
}
}
/** When we hit an accept state in either the DFA or the ATN, we
* have to notify the character stream to start buffering characters
* via {@link IntStream#mark} and record the current state. The current sim state
* includes the current index into the input, the current line,
* and current character position in that line. Note that the Lexer is
* tracking the starting line and characterization of the token. These
* variables track the "state" of the simulator when it hits an accept state.
*
* <p>We track these variables separately for the DFA and ATN simulation
* because the DFA simulation often has to fail over to the ATN
* simulation. If the ATN simulation fails, we need the DFA to fall
* back to its previously accepted state, if any. If the ATN succeeds,
* then the ATN does the accept and the DFA simulator that invoked it
* can simply return the predicted token type.</p>
*/
internal class SimState
{
public int index = -1;
public int line = 0;
public int charPos = -1;
public DFAState dfaState;
public void Reset()
{
index = -1;
line = 0;
charPos = -1;
dfaState = null;
}
}
}

Просмотреть файл

@ -0,0 +1,275 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Represents an executor for a sequence of lexer actions which traversed during
/// the matching operation of a lexer rule (token).
/// </summary>
/// <remarks>
/// Represents an executor for a sequence of lexer actions which traversed during
/// the matching operation of a lexer rule (token).
/// <p>The executor tracks position information for position-dependent lexer actions
/// efficiently, ensuring that actions appearing only at the end of the rule do
/// not cause bloating of the
/// <see cref="Antlr4.Runtime.Dfa.DFA"/>
/// created for the lexer.</p>
/// </remarks>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal class LexerActionExecutor
{
[NotNull]
private readonly ILexerAction[] lexerActions;
/// <summary>
/// Caches the result of
/// <see cref="hashCode"/>
/// since the hash code is an element
/// of the performance-critical
/// <see cref="ATNConfig.GetHashCode()"/>
/// operation.
/// </summary>
private readonly int hashCode;
/// <summary>
/// Constructs an executor for a sequence of
/// <see cref="ILexerAction"/>
/// actions.
/// </summary>
/// <param name="lexerActions">The lexer actions to execute.</param>
public LexerActionExecutor(ILexerAction[] lexerActions)
{
this.lexerActions = lexerActions;
int hash = MurmurHash.Initialize();
foreach (ILexerAction lexerAction in lexerActions)
{
hash = MurmurHash.Update(hash, lexerAction);
}
this.hashCode = MurmurHash.Finish(hash, lexerActions.Length);
}
/// <summary>
/// Creates a
/// <see cref="LexerActionExecutor"/>
/// which executes the actions for
/// the input
/// <paramref name="lexerActionExecutor"/>
/// followed by a specified
/// <paramref name="lexerAction"/>
/// .
/// </summary>
/// <param name="lexerActionExecutor">
/// The executor for actions already traversed by
/// the lexer while matching a token within a particular
/// <see cref="ATNConfig"/>
/// . If this is
/// <see langword="null"/>
/// , the method behaves as though
/// it were an empty executor.
/// </param>
/// <param name="lexerAction">
/// The lexer action to execute after the actions
/// specified in
/// <paramref name="lexerActionExecutor"/>
/// .
/// </param>
/// <returns>
/// A
/// <see cref="LexerActionExecutor"/>
/// for executing the combine actions
/// of
/// <paramref name="lexerActionExecutor"/>
/// and
/// <paramref name="lexerAction"/>
/// .
/// </returns>
[return: NotNull]
public static Antlr4.Runtime.Atn.LexerActionExecutor Append(Antlr4.Runtime.Atn.LexerActionExecutor lexerActionExecutor, ILexerAction lexerAction)
{
if (lexerActionExecutor == null)
{
return new Antlr4.Runtime.Atn.LexerActionExecutor(new ILexerAction[] { lexerAction });
}
ILexerAction[] lexerActions = Arrays.CopyOf(lexerActionExecutor.lexerActions, lexerActionExecutor.lexerActions.Length + 1);
lexerActions[lexerActions.Length - 1] = lexerAction;
return new Antlr4.Runtime.Atn.LexerActionExecutor(lexerActions);
}
/// <summary>
/// Creates a
/// <see cref="LexerActionExecutor"/>
/// which encodes the current offset
/// for position-dependent lexer actions.
/// <p>Normally, when the executor encounters lexer actions where
/// <see cref="ILexerAction.IsPositionDependent()"/>
/// returns
/// <see langword="true"/>
/// , it calls
/// <see cref="Antlr4.Runtime.IIntStream.Seek(int)"/>
/// on the input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// to set the input
/// position to the <em>end</em> of the current token. This behavior provides
/// for efficient DFA representation of lexer actions which appear at the end
/// of a lexer rule, even when the lexer rule matches a variable number of
/// characters.</p>
/// <p>Prior to traversing a match transition in the ATN, the current offset
/// from the token start index is assigned to all position-dependent lexer
/// actions which have not already been assigned a fixed offset. By storing
/// the offsets relative to the token start index, the DFA representation of
/// lexer actions which appear in the middle of tokens remains efficient due
/// to sharing among tokens of the same length, regardless of their absolute
/// position in the input stream.</p>
/// <p>If the current executor already has offsets assigned to all
/// position-dependent lexer actions, the method returns
/// <c>this</c>
/// .</p>
/// </summary>
/// <param name="offset">
/// The current offset to assign to all position-dependent
/// lexer actions which do not already have offsets assigned.
/// </param>
/// <returns>
/// A
/// <see cref="LexerActionExecutor"/>
/// which stores input stream offsets
/// for all position-dependent lexer actions.
/// </returns>
public virtual Antlr4.Runtime.Atn.LexerActionExecutor FixOffsetBeforeMatch(int offset)
{
ILexerAction[] updatedLexerActions = null;
for (int i = 0; i < lexerActions.Length; i++)
{
if (lexerActions[i].IsPositionDependent && !(lexerActions[i] is LexerIndexedCustomAction))
{
if (updatedLexerActions == null)
{
updatedLexerActions = (ILexerAction[])lexerActions.Clone();
}
updatedLexerActions[i] = new LexerIndexedCustomAction(offset, lexerActions[i]);
}
}
if (updatedLexerActions == null)
{
return this;
}
return new Antlr4.Runtime.Atn.LexerActionExecutor(updatedLexerActions);
}
/// <summary>Gets the lexer actions to be executed by this executor.</summary>
/// <remarks>Gets the lexer actions to be executed by this executor.</remarks>
/// <returns>The lexer actions to be executed by this executor.</returns>
[NotNull]
public virtual ILexerAction[] LexerActions
{
get
{
return lexerActions;
}
}
/// <summary>
/// Execute the actions encapsulated by this executor within the context of a
/// particular
/// <see cref="Antlr4.Runtime.Lexer"/>
/// .
/// <p>This method calls
/// <see cref="Antlr4.Runtime.IIntStream.Seek(int)"/>
/// to set the position of the
/// <paramref name="input"/>
///
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// prior to calling
/// <see cref="ILexerAction.Execute(Antlr4.Runtime.Lexer)"/>
/// on a position-dependent action. Before the
/// method returns, the input position will be restored to the same position
/// it was in when the method was invoked.</p>
/// </summary>
/// <param name="lexer">The lexer instance.</param>
/// <param name="input">
/// The input stream which is the source for the current token.
/// When this method is called, the current
/// <see cref="Antlr4.Runtime.IIntStream.Index()"/>
/// for
/// <paramref name="input"/>
/// should be the start of the following token, i.e. 1
/// character past the end of the current token.
/// </param>
/// <param name="startIndex">
/// The token start index. This value may be passed to
/// <see cref="Antlr4.Runtime.IIntStream.Seek(int)"/>
/// to set the
/// <paramref name="input"/>
/// position to the beginning
/// of the token.
/// </param>
public virtual void Execute(Lexer lexer, ICharStream input, int startIndex)
{
bool requiresSeek = false;
int stopIndex = input.Index;
try
{
foreach (ILexerAction lexerAction in lexerActions)
{
ILexerAction action = lexerAction;
if (action is LexerIndexedCustomAction)
{
int offset = ((LexerIndexedCustomAction)action).Offset;
input.Seek(startIndex + offset);
action = ((LexerIndexedCustomAction)action).Action;
requiresSeek = (startIndex + offset) != stopIndex;
}
else
{
if (action.IsPositionDependent)
{
input.Seek(stopIndex);
requiresSeek = false;
}
}
action.Execute(lexer);
}
}
finally
{
if (requiresSeek)
{
input.Seek(stopIndex);
}
}
}
public override int GetHashCode()
{
return this.hashCode;
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerActionExecutor))
{
return false;
}
}
Antlr4.Runtime.Atn.LexerActionExecutor other = (Antlr4.Runtime.Atn.LexerActionExecutor)obj;
return hashCode == other.hashCode && Arrays.Equals(lexerActions, other.lexerActions);
}
}
}

Просмотреть файл

@ -0,0 +1,26 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal enum LexerActionType
{
Channel,
Custom,
Mode,
More,
PopMode,
PushMode,
Skip,
Type
}
}

Просмотреть файл

@ -0,0 +1,132 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>channel</c>
/// lexer action by calling
/// <see cref="Lexer.Channel"/>
/// with the assigned channel.
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerChannelAction : ILexerAction
{
private readonly int channel;
/// <summary>
/// Constructs a new
/// <paramref name="channel"/>
/// action with the specified channel value.
/// </summary>
/// <param name="channel">
/// The channel value to pass to
/// <see cref="Lexer.Channel"/>
/// .
/// </param>
public LexerChannelAction(int channel)
{
this.channel = channel;
}
/// <summary>
/// Gets the channel to use for the
/// <see cref="Antlr4.Runtime.IToken"/>
/// created by the lexer.
/// </summary>
/// <returns>
/// The channel to use for the
/// <see cref="Antlr4.Runtime.IToken"/>
/// created by the lexer.
/// </returns>
public int Channel
{
get
{
return channel;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.Channel"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.Channel;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Lexer.Channel"/>
/// with the
/// value provided by
/// <see cref="Channel()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.Channel = channel;
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
hash = MurmurHash.Update(hash, channel);
return MurmurHash.Finish(hash, 2);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerChannelAction))
{
return false;
}
}
return channel == ((Antlr4.Runtime.Atn.LexerChannelAction)obj).channel;
}
public override string ToString()
{
return string.Format("channel({0})", channel);
}
}
}

Просмотреть файл

@ -0,0 +1,164 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Executes a custom lexer action by calling
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// with the
/// rule and action indexes assigned to the custom action. The implementation of
/// a custom action is added to the generated code for the lexer in an override
/// of
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// when the grammar is compiled.
/// <p>This class may represent embedded actions created with the <code>{...}</code>
/// syntax in ANTLR 4, as well as actions created for lexer commands where the
/// command argument could not be evaluated when the grammar was compiled.</p>
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerCustomAction : ILexerAction
{
private readonly int ruleIndex;
private readonly int actionIndex;
/// <summary>
/// Constructs a custom lexer action with the specified rule and action
/// indexes.
/// </summary>
/// <remarks>
/// Constructs a custom lexer action with the specified rule and action
/// indexes.
/// </remarks>
/// <param name="ruleIndex">
/// The rule index to use for calls to
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// .
/// </param>
/// <param name="actionIndex">
/// The action index to use for calls to
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// .
/// </param>
public LexerCustomAction(int ruleIndex, int actionIndex)
{
this.ruleIndex = ruleIndex;
this.actionIndex = actionIndex;
}
/// <summary>
/// Gets the rule index to use for calls to
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// .
/// </summary>
/// <returns>The rule index for the custom action.</returns>
public int RuleIndex
{
get
{
return ruleIndex;
}
}
/// <summary>
/// Gets the action index to use for calls to
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// .
/// </summary>
/// <returns>The action index for the custom action.</returns>
public int ActionIndex
{
get
{
return actionIndex;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.Custom"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.Custom;
}
}
/// <summary>Gets whether the lexer action is position-dependent.</summary>
/// <remarks>
/// Gets whether the lexer action is position-dependent. Position-dependent
/// actions may have different semantics depending on the
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// index at the time the action is executed.
/// <p>Custom actions are position-dependent since they may represent a
/// user-defined embedded action which makes calls to methods like
/// <see cref="Antlr4.Runtime.Lexer.Text()"/>
/// .</p>
/// </remarks>
/// <returns>
/// This method returns
/// <see langword="true"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return true;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>Custom actions are implemented by calling
/// <see cref="Antlr4.Runtime.Recognizer{Symbol, ATNInterpreter}.Action(Antlr4.Runtime.RuleContext, int, int)"/>
/// with the
/// appropriate rule and action indexes.</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.Action(null, ruleIndex, actionIndex);
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
hash = MurmurHash.Update(hash, ruleIndex);
hash = MurmurHash.Update(hash, actionIndex);
return MurmurHash.Finish(hash, 3);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerCustomAction))
{
return false;
}
}
Antlr4.Runtime.Atn.LexerCustomAction other = (Antlr4.Runtime.Atn.LexerCustomAction)obj;
return ruleIndex == other.ruleIndex && actionIndex == other.actionIndex;
}
}
}

Просмотреть файл

@ -0,0 +1,179 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This implementation of
/// <see cref="ILexerAction"/>
/// is used for tracking input offsets
/// for position-dependent actions within a
/// <see cref="LexerActionExecutor"/>
/// .
/// <p>This action is not serialized as part of the ATN, and is only required for
/// position-dependent lexer actions which appear at a location other than the
/// end of a rule. For more information about DFA optimizations employed for
/// lexer actions, see
/// <see cref="LexerActionExecutor.Append(LexerActionExecutor, ILexerAction)"/>
/// and
/// <see cref="LexerActionExecutor.FixOffsetBeforeMatch(int)"/>
/// .</p>
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerIndexedCustomAction : ILexerAction
{
private readonly int offset;
private readonly ILexerAction action;
/// <summary>
/// Constructs a new indexed custom action by associating a character offset
/// with a
/// <see cref="ILexerAction"/>
/// .
/// <p>Note: This class is only required for lexer actions for which
/// <see cref="ILexerAction.IsPositionDependent()"/>
/// returns
/// <see langword="true"/>
/// .</p>
/// </summary>
/// <param name="offset">
/// The offset into the input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// , relative to
/// the token start index, at which the specified lexer action should be
/// executed.
/// </param>
/// <param name="action">
/// The lexer action to execute at a particular offset in the
/// input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// .
/// </param>
public LexerIndexedCustomAction(int offset, ILexerAction action)
{
this.offset = offset;
this.action = action;
}
/// <summary>
/// Gets the location in the input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// at which the lexer
/// action should be executed. The value is interpreted as an offset relative
/// to the token start index.
/// </summary>
/// <returns>
/// The location in the input
/// <see cref="Antlr4.Runtime.ICharStream"/>
/// at which the lexer
/// action should be executed.
/// </returns>
public int Offset
{
get
{
return offset;
}
}
/// <summary>Gets the lexer action to execute.</summary>
/// <remarks>Gets the lexer action to execute.</remarks>
/// <returns>
/// A
/// <see cref="ILexerAction"/>
/// object which executes the lexer action.
/// </returns>
[NotNull]
public ILexerAction Action
{
get
{
return action;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns the result of calling
/// <see cref="ActionType()"/>
/// on the
/// <see cref="ILexerAction"/>
/// returned by
/// <see cref="Action()"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return action.ActionType;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="true"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return true;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This method calls
/// <see cref="Execute(Antlr4.Runtime.Lexer)"/>
/// on the result of
/// <see cref="Action()"/>
/// using the provided
/// <paramref name="lexer"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
// assume the input stream position was properly set by the calling code
action.Execute(lexer);
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, offset);
hash = MurmurHash.Update(hash, action);
return MurmurHash.Finish(hash, 2);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerIndexedCustomAction))
{
return false;
}
}
Antlr4.Runtime.Atn.LexerIndexedCustomAction other = (Antlr4.Runtime.Atn.LexerIndexedCustomAction)obj;
return offset == other.offset && action.Equals(other.action);
}
}
}

Просмотреть файл

@ -0,0 +1,130 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>mode</c>
/// lexer action by calling
/// <see cref="Antlr4.Runtime.Lexer.Mode(int)"/>
/// with
/// the assigned mode.
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerModeAction : ILexerAction
{
private readonly int mode;
/// <summary>
/// Constructs a new
/// <paramref name="mode"/>
/// action with the specified mode value.
/// </summary>
/// <param name="mode">
/// The mode value to pass to
/// <see cref="Antlr4.Runtime.Lexer.Mode(int)"/>
/// .
/// </param>
public LexerModeAction(int mode)
{
this.mode = mode;
}
/// <summary>Get the lexer mode this action should transition the lexer to.</summary>
/// <remarks>Get the lexer mode this action should transition the lexer to.</remarks>
/// <returns>
/// The lexer mode for this
/// <c>mode</c>
/// command.
/// </returns>
public int Mode
{
get
{
return mode;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.Mode"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.Mode;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Antlr4.Runtime.Lexer.Mode(int)"/>
/// with the
/// value provided by
/// <see cref="Mode()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.Mode(mode);
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
hash = MurmurHash.Update(hash, mode);
return MurmurHash.Finish(hash, 2);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerModeAction))
{
return false;
}
}
return mode == ((Antlr4.Runtime.Atn.LexerModeAction)obj).mode;
}
public override string ToString()
{
return string.Format("mode({0})", mode);
}
}
}

Просмотреть файл

@ -0,0 +1,101 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>more</c>
/// lexer action by calling
/// <see cref="Antlr4.Runtime.Lexer.More()"/>
/// .
/// <p>The
/// <c>more</c>
/// command does not have any parameters, so this action is
/// implemented as a singleton instance exposed by
/// <see cref="Instance"/>
/// .</p>
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerMoreAction : ILexerAction
{
/// <summary>Provides a singleton instance of this parameterless lexer action.</summary>
/// <remarks>Provides a singleton instance of this parameterless lexer action.</remarks>
public static readonly Antlr4.Runtime.Atn.LexerMoreAction Instance = new Antlr4.Runtime.Atn.LexerMoreAction();
/// <summary>
/// Constructs the singleton instance of the lexer
/// <c>more</c>
/// command.
/// </summary>
private LexerMoreAction()
{
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.More"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.More;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Antlr4.Runtime.Lexer.More()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.More();
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
return MurmurHash.Finish(hash, 1);
}
public override bool Equals(object obj)
{
return obj == this;
}
public override string ToString()
{
return "more";
}
}
}

Просмотреть файл

@ -0,0 +1,101 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>popMode</c>
/// lexer action by calling
/// <see cref="Antlr4.Runtime.Lexer.PopMode()"/>
/// .
/// <p>The
/// <c>popMode</c>
/// command does not have any parameters, so this action is
/// implemented as a singleton instance exposed by
/// <see cref="Instance"/>
/// .</p>
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerPopModeAction : ILexerAction
{
/// <summary>Provides a singleton instance of this parameterless lexer action.</summary>
/// <remarks>Provides a singleton instance of this parameterless lexer action.</remarks>
public static readonly Antlr4.Runtime.Atn.LexerPopModeAction Instance = new Antlr4.Runtime.Atn.LexerPopModeAction();
/// <summary>
/// Constructs the singleton instance of the lexer
/// <c>popMode</c>
/// command.
/// </summary>
private LexerPopModeAction()
{
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.PopMode"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.PopMode;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Antlr4.Runtime.Lexer.PopMode()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.PopMode();
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
return MurmurHash.Finish(hash, 1);
}
public override bool Equals(object obj)
{
return obj == this;
}
public override string ToString()
{
return "popMode";
}
}
}

Просмотреть файл

@ -0,0 +1,129 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>pushMode</c>
/// lexer action by calling
/// <see cref="Antlr4.Runtime.Lexer.PushMode(int)"/>
/// with the assigned mode.
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerPushModeAction : ILexerAction
{
private readonly int mode;
/// <summary>
/// Constructs a new
/// <c>pushMode</c>
/// action with the specified mode value.
/// </summary>
/// <param name="mode">
/// The mode value to pass to
/// <see cref="Antlr4.Runtime.Lexer.PushMode(int)"/>
/// .
/// </param>
public LexerPushModeAction(int mode)
{
this.mode = mode;
}
/// <summary>Get the lexer mode this action should transition the lexer to.</summary>
/// <remarks>Get the lexer mode this action should transition the lexer to.</remarks>
/// <returns>
/// The lexer mode for this
/// <c>pushMode</c>
/// command.
/// </returns>
public int Mode
{
get
{
return mode;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.PushMode"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.PushMode;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Antlr4.Runtime.Lexer.PushMode(int)"/>
/// with the
/// value provided by
/// <see cref="Mode()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.PushMode(mode);
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
hash = MurmurHash.Update(hash, mode);
return MurmurHash.Finish(hash, 2);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerPushModeAction))
{
return false;
}
}
return mode == ((Antlr4.Runtime.Atn.LexerPushModeAction)obj).mode;
}
public override string ToString()
{
return string.Format("pushMode({0})", mode);
}
}
}

Просмотреть файл

@ -0,0 +1,101 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>skip</c>
/// lexer action by calling
/// <see cref="Antlr4.Runtime.Lexer.Skip()"/>
/// .
/// <p>The
/// <c>skip</c>
/// command does not have any parameters, so this action is
/// implemented as a singleton instance exposed by
/// <see cref="Instance"/>
/// .</p>
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal sealed class LexerSkipAction : ILexerAction
{
/// <summary>Provides a singleton instance of this parameterless lexer action.</summary>
/// <remarks>Provides a singleton instance of this parameterless lexer action.</remarks>
public static readonly Antlr4.Runtime.Atn.LexerSkipAction Instance = new Antlr4.Runtime.Atn.LexerSkipAction();
/// <summary>
/// Constructs the singleton instance of the lexer
/// <c>skip</c>
/// command.
/// </summary>
private LexerSkipAction()
{
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.Skip"/>
/// .
/// </returns>
public LexerActionType ActionType
{
get
{
return LexerActionType.Skip;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Antlr4.Runtime.Lexer.Skip()"/>
/// .</p>
/// </summary>
public void Execute(Lexer lexer)
{
lexer.Skip();
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
return MurmurHash.Finish(hash, 1);
}
public override bool Equals(object obj)
{
return obj == this;
}
public override string ToString()
{
return "skip";
}
}
}

Просмотреть файл

@ -0,0 +1,125 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Implements the
/// <c>type</c>
/// lexer action by calling
/// <see cref="Lexer.Type"/>
/// with the assigned type.
/// </summary>
/// <author>Sam Harwell</author>
/// <since>4.2</since>
internal class LexerTypeAction : ILexerAction
{
private readonly int type;
/// <summary>
/// Constructs a new
/// <paramref name="type"/>
/// action with the specified token type value.
/// </summary>
/// <param name="type">
/// The type to assign to the token using
/// <see cref="Lexer.Type"/>
/// .
/// </param>
public LexerTypeAction(int type)
{
this.type = type;
}
/// <summary>Gets the type to assign to a token created by the lexer.</summary>
/// <remarks>Gets the type to assign to a token created by the lexer.</remarks>
/// <returns>The type to assign to a token created by the lexer.</returns>
public virtual int Type
{
get
{
return type;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see cref="LexerActionType.Type"/>
/// .
/// </returns>
public virtual LexerActionType ActionType
{
get
{
return LexerActionType.Type;
}
}
/// <summary><inheritDoc/></summary>
/// <returns>
/// This method returns
/// <see langword="false"/>
/// .
/// </returns>
public virtual bool IsPositionDependent
{
get
{
return false;
}
}
/// <summary>
/// <inheritDoc/>
/// <p>This action is implemented by calling
/// <see cref="Lexer.Type"/>
/// with the
/// value provided by
/// <see cref="Type()"/>
/// .</p>
/// </summary>
public virtual void Execute(Lexer lexer)
{
lexer.Type = type;
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize();
hash = MurmurHash.Update(hash, (int)(ActionType));
hash = MurmurHash.Update(hash, type);
return MurmurHash.Finish(hash, 2);
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
else
{
if (!(obj is Antlr4.Runtime.Atn.LexerTypeAction))
{
return false;
}
}
return type == ((Antlr4.Runtime.Atn.LexerTypeAction)obj).type;
}
public override string ToString()
{
return string.Format("type({0})", type);
}
}
}

Просмотреть файл

@ -0,0 +1,56 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This class represents profiling event information for tracking the lookahead
/// depth required in order to make a prediction.
/// </summary>
/// <remarks>
/// This class represents profiling event information for tracking the lookahead
/// depth required in order to make a prediction.
/// </remarks>
/// <since>4.3</since>
internal class LookaheadEventInfo : DecisionEventInfo
{
/// <summary>
/// Constructs a new instance of the
/// <see cref="LookaheadEventInfo"/>
/// class with
/// the specified detailed lookahead information.
/// </summary>
/// <param name="decision">The decision number</param>
/// <param name="state">
/// The final simulator state containing the necessary
/// information to determine the result of a prediction, or
/// <see langword="null"/>
/// if
/// the final state is not available
/// </param>
/// <param name="input">The input token stream</param>
/// <param name="startIndex">The start index for the current prediction</param>
/// <param name="stopIndex">The index at which the prediction was finally made</param>
/// <param name="fullCtx">
///
/// <see langword="true"/>
/// if the current lookahead is part of an LL
/// prediction; otherwise,
/// <see langword="false"/>
/// if the current lookahead is part of
/// an SLL prediction
/// </param>
public LookaheadEventInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex, bool fullCtx)
: base(decision, state, input, startIndex, stopIndex, fullCtx)
{
}
}
}

Просмотреть файл

@ -0,0 +1,27 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>Mark the end of a * or + loop.</summary>
/// <remarks>Mark the end of a * or + loop.</remarks>
internal sealed class LoopEndState : ATNState
{
public ATNState loopBackState;
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.LoopEnd;
}
}
}
}

Просмотреть файл

@ -0,0 +1,41 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
namespace Antlr4.Runtime.Atn
{
internal class MergeCache
{
Dictionary<PredictionContext, Dictionary<PredictionContext, PredictionContext>> data = new Dictionary<PredictionContext, Dictionary<PredictionContext, PredictionContext>>();
public PredictionContext Get(PredictionContext a, PredictionContext b)
{
Dictionary<PredictionContext, PredictionContext> first;
if (!data.TryGetValue(a, out first))
return null;
PredictionContext value;
if (first.TryGetValue(b, out value))
return value;
else
return null;
}
public void Put(PredictionContext a, PredictionContext b, PredictionContext value)
{
Dictionary<PredictionContext, PredictionContext> first;
if (!data.TryGetValue(a, out first))
{
first = new Dictionary<PredictionContext, PredictionContext>();
data[a] = first;
}
first[b] = value;
}
}
}

Просмотреть файл

@ -0,0 +1,39 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal sealed class NotSetTransition : SetTransition
{
public NotSetTransition(ATNState target, IntervalSet set)
: base(target, set)
{
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.NOT_SET;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !base.Matches(symbol, minVocabSymbol, maxVocabSymbol);
}
public override string ToString()
{
return '~' + base.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,188 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/**
* This class provides access to specific and aggregate statistics gathered
* during profiling of a parser.
*
* @since 4.3
*/
internal class ParseInfo
{
protected readonly ProfilingATNSimulator atnSimulator;
public ParseInfo(ProfilingATNSimulator atnSimulator)
{
this.atnSimulator = atnSimulator;
}
/**
* Gets an array of {@link DecisionInfo} instances containing the profiling
* information gathered for each decision in the ATN.
*
* @return An array of {@link DecisionInfo} instances, indexed by decision
* number.
*/
public DecisionInfo[] getDecisionInfo()
{
return atnSimulator.getDecisionInfo();
}
/**
* Gets the decision numbers for decisions that required one or more
* full-context predictions during parsing. These are decisions for which
* {@link DecisionInfo#LL_Fallback} is non-zero.
*
* @return A list of decision numbers which required one or more
* full-context predictions during parsing.
*/
public List<int> getLLDecisions()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
List<int> LL = new List<int>();
for (int i = 0; i < decisions.Length; i++)
{
long fallBack = decisions[i].LL_Fallback;
if (fallBack > 0) LL.Add(i);
}
return LL;
}
/**
* Gets the total time spent during prediction across all decisions made
* during parsing. This value is the sum of
* {@link DecisionInfo#timeInPrediction} for all decisions.
*/
public long getTotalTimeInPrediction()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long t = 0;
for (int i = 0; i < decisions.Length; i++)
{
t += decisions[i].timeInPrediction;
}
return t;
}
/**
* Gets the total number of SLL lookahead operations across all decisions
* made during parsing. This value is the sum of
* {@link DecisionInfo#SLL_TotalLook} for all decisions.
*/
public long getTotalSLLLookaheadOps()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long k = 0;
for (int i = 0; i < decisions.Length; i++)
{
k += decisions[i].SLL_TotalLook;
}
return k;
}
/**
* Gets the total number of LL lookahead operations across all decisions
* made during parsing. This value is the sum of
* {@link DecisionInfo#LL_TotalLook} for all decisions.
*/
public long getTotalLLLookaheadOps()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long k = 0;
for (int i = 0; i < decisions.Length; i++)
{
k += decisions[i].LL_TotalLook;
}
return k;
}
/**
* Gets the total number of ATN lookahead operations for SLL prediction
* across all decisions made during parsing.
*/
public long getTotalSLLATNLookaheadOps()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long k = 0;
for (int i = 0; i < decisions.Length; i++)
{
k += decisions[i].SLL_ATNTransitions;
}
return k;
}
/**
* Gets the total number of ATN lookahead operations for LL prediction
* across all decisions made during parsing.
*/
public long getTotalLLATNLookaheadOps()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long k = 0;
for (int i = 0; i < decisions.Length; i++)
{
k += decisions[i].LL_ATNTransitions;
}
return k;
}
/**
* Gets the total number of ATN lookahead operations for SLL and LL
* prediction across all decisions made during parsing.
*
* <p>
* This value is the sum of {@link #getTotalSLLATNLookaheadOps} and
* {@link #getTotalLLATNLookaheadOps}.</p>
*/
public long getTotalATNLookaheadOps()
{
DecisionInfo[] decisions = atnSimulator.getDecisionInfo();
long k = 0;
for (int i = 0; i < decisions.Length; i++)
{
k += decisions[i].SLL_ATNTransitions;
k += decisions[i].LL_ATNTransitions;
}
return k;
}
/**
* Gets the total number of DFA states stored in the DFA cache for all
* decisions in the ATN.
*/
public int getDFASize()
{
int n = 0;
DFA[] decisionToDFA = atnSimulator.decisionToDFA;
for (int i = 0; i < decisionToDFA.Length; i++)
{
n += getDFASize(i);
}
return n;
}
/**
* Gets the total number of DFA states stored in the DFA cache for a
* particular decision.
*/
public int getDFASize(int decision)
{
DFA decisionToDFA = atnSimulator.decisionToDFA[decision];
return decisionToDFA.states.Count;
}
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,37 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Start of
/// <c>(A|B|...)+</c>
/// loop. Technically a decision state, but
/// we don't use for code generation; somebody might need it, so I'm defining
/// it for completeness. In reality, the
/// <see cref="PlusLoopbackState"/>
/// node is the
/// real decision-making note for
/// <c>A+</c>
/// .
/// </summary>
internal sealed class PlusBlockStartState : BlockStartState
{
public PlusLoopbackState loopBackState;
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.PlusBlockStart;
}
}
}
}

Просмотреть файл

@ -0,0 +1,31 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// Decision state for
/// <c>A+</c>
/// and
/// <c>(A|B)+</c>
/// . It has two transitions:
/// one to the loop back to start of the block and one to exit.
/// </summary>
internal sealed class PlusLoopbackState : DecisionState
{
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.PlusLoopBack;
}
}
}
}

Просмотреть файл

@ -0,0 +1,58 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal sealed class PrecedencePredicateTransition : AbstractPredicateTransition
{
public readonly int precedence;
public PrecedencePredicateTransition(ATNState target, int precedence)
: base(target)
{
this.precedence = precedence;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.PRECEDENCE;
}
}
public override bool IsEpsilon
{
get
{
return true;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return false;
}
public SemanticContext.PrecedencePredicate Predicate
{
get
{
return new SemanticContext.PrecedencePredicate(precedence);
}
}
public override string ToString()
{
return precedence + " >= _p";
}
}
}

Просмотреть файл

@ -0,0 +1,84 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This class represents profiling event information for semantic predicate
/// evaluations which occur during prediction.
/// </summary>
/// <remarks>
/// This class represents profiling event information for semantic predicate
/// evaluations which occur during prediction.
/// </remarks>
/// <seealso cref="ParserATNSimulator.EvalSemanticContext(Dfa.PredPrediction[], ParserRuleContext, bool)"/>
/// <since>4.3</since>
internal class PredicateEvalInfo : DecisionEventInfo
{
/// <summary>The semantic context which was evaluated.</summary>
/// <remarks>The semantic context which was evaluated.</remarks>
public readonly SemanticContext semctx;
/// <summary>
/// The alternative number for the decision which is guarded by the semantic
/// context
/// <see cref="semctx"/>
/// . Note that other ATN
/// configurations may predict the same alternative which are guarded by
/// other semantic contexts and/or
/// <see cref="SemanticContext.NONE"/>
/// .
/// </summary>
public readonly int predictedAlt;
/// <summary>
/// The result of evaluating the semantic context
/// <see cref="semctx"/>
/// .
/// </summary>
public readonly bool evalResult;
/// <summary>
/// Constructs a new instance of the
/// <see cref="PredicateEvalInfo"/>
/// class with the
/// specified detailed predicate evaluation information.
/// </summary>
/// <param name="state">The simulator state</param>
/// <param name="decision">The decision number</param>
/// <param name="input">The input token stream</param>
/// <param name="startIndex">The start index for the current prediction</param>
/// <param name="stopIndex">
/// The index at which the predicate evaluation was
/// triggered. Note that the input stream may be reset to other positions for
/// the actual evaluation of individual predicates.
/// </param>
/// <param name="semctx">The semantic context which was evaluated</param>
/// <param name="evalResult">The results of evaluating the semantic context</param>
/// <param name="predictedAlt">
/// The alternative number for the decision which is
/// guarded by the semantic context
/// <paramref name="semctx"/>
/// . See
/// <see cref="predictedAlt"/>
/// for more information.
/// </param>
/// <seealso cref="ParserATNSimulator.EvalSemanticContext(SemanticContext, ParserRuleContext, int, bool)"/>
/// <seealso cref="SemanticContext.Eval"/>
public PredicateEvalInfo(SimulatorState state, int decision, ITokenStream input, int startIndex, int stopIndex, SemanticContext semctx, bool evalResult, int predictedAlt)
: base(decision, state, input, startIndex, stopIndex, state.useContext)
{
this.semctx = semctx;
this.evalResult = evalResult;
this.predictedAlt = predictedAlt;
}
}
}

Просмотреть файл

@ -0,0 +1,77 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// TODO: this is old comment:
/// A tree of semantic predicates from the grammar AST if label==SEMPRED.
/// </summary>
/// <remarks>
/// TODO: this is old comment:
/// A tree of semantic predicates from the grammar AST if label==SEMPRED.
/// In the ATN, labels will always be exactly one predicate, but the DFA
/// may have to combine a bunch of them as it collects predicates from
/// multiple ATN configurations into a single DFA state.
/// </remarks>
internal sealed class PredicateTransition : AbstractPredicateTransition
{
public readonly int ruleIndex;
public readonly int predIndex;
public readonly bool isCtxDependent;
public PredicateTransition(ATNState target, int ruleIndex, int predIndex, bool isCtxDependent)
: base(target)
{
// e.g., $i ref in pred
this.ruleIndex = ruleIndex;
this.predIndex = predIndex;
this.isCtxDependent = isCtxDependent;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.PREDICATE;
}
}
public override bool IsEpsilon
{
get
{
return true;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return false;
}
public SemanticContext.Predicate Predicate
{
get
{
return new SemanticContext.Predicate(ruleIndex, predIndex, isCtxDependent);
}
}
[return: NotNull]
public override string ToString()
{
return "pred_" + ruleIndex + ":" + predIndex;
}
}
}

Просмотреть файл

@ -0,0 +1,586 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal abstract class PredictionContext
{
public static readonly int EMPTY_RETURN_STATE = int.MaxValue;
public static readonly EmptyPredictionContext EMPTY = new EmptyPredictionContext();
private static readonly int INITIAL_HASH = 1;
protected internal static int CalculateEmptyHashCode()
{
int hash = MurmurHash.Initialize(INITIAL_HASH);
hash = MurmurHash.Finish(hash, 0);
return hash;
}
protected internal static int CalculateHashCode(PredictionContext parent, int returnState)
{
int hash = MurmurHash.Initialize(INITIAL_HASH);
hash = MurmurHash.Update(hash, parent);
hash = MurmurHash.Update(hash, returnState);
hash = MurmurHash.Finish(hash, 2);
return hash;
}
protected internal static int CalculateHashCode(PredictionContext[] parents, int[] returnStates)
{
int hash = MurmurHash.Initialize(INITIAL_HASH);
foreach (PredictionContext parent in parents)
{
hash = MurmurHash.Update(hash, parent);
}
foreach (int returnState in returnStates)
{
hash = MurmurHash.Update(hash, returnState);
}
hash = MurmurHash.Finish(hash, 2 * parents.Length);
return hash;
}
private readonly int cachedHashCode;
protected internal PredictionContext(int cachedHashCode)
{
this.cachedHashCode = cachedHashCode;
}
public static PredictionContext FromRuleContext(ATN atn, RuleContext outerContext)
{
if (outerContext == null)
outerContext = ParserRuleContext.EMPTY;
if (outerContext.Parent == null || outerContext == ParserRuleContext.EMPTY)
return PredictionContext.EMPTY;
PredictionContext parent = PredictionContext.FromRuleContext(atn, outerContext.Parent);
ATNState state = atn.states[outerContext.invokingState];
RuleTransition transition = (RuleTransition)state.Transition(0);
return parent.GetChild(transition.followState.stateNumber);
}
public abstract int Size
{
get;
}
public abstract PredictionContext GetParent(int index);
public abstract int GetReturnState(int index);
public virtual bool IsEmpty
{
get
{
return this == EMPTY;
}
}
public virtual bool HasEmptyPath
{
get
{
return GetReturnState(Size - 1) == EMPTY_RETURN_STATE;
}
}
public sealed override int GetHashCode()
{
return cachedHashCode;
}
internal static PredictionContext Merge(PredictionContext a, PredictionContext b, bool rootIsWildcard, MergeCache mergeCache)
{
if (a == b || a.Equals(b))
{
return a;
}
if (a is SingletonPredictionContext && b is SingletonPredictionContext)
{
return MergeSingletons((SingletonPredictionContext)a,
(SingletonPredictionContext)b,
rootIsWildcard, mergeCache);
}
// At least one of a or b is array
// If one is $ and rootIsWildcard, return $ as * wildcard
if (rootIsWildcard)
{
if (a is EmptyPredictionContext)
return a;
if (b is EmptyPredictionContext)
return b;
}
// convert singleton so both are arrays to normalize
if (a is SingletonPredictionContext)
{
a = new ArrayPredictionContext((SingletonPredictionContext)a);
}
if (b is SingletonPredictionContext)
{
b = new ArrayPredictionContext((SingletonPredictionContext)b);
}
return MergeArrays((ArrayPredictionContext)a, (ArrayPredictionContext)b,
rootIsWildcard, mergeCache);
}
public static PredictionContext MergeSingletons(
SingletonPredictionContext a,
SingletonPredictionContext b,
bool rootIsWildcard,
MergeCache mergeCache)
{
if (mergeCache != null)
{
PredictionContext previous = mergeCache.Get(a, b);
if (previous != null) return previous;
previous = mergeCache.Get(b, a);
if (previous != null) return previous;
}
PredictionContext rootMerge = MergeRoot(a, b, rootIsWildcard);
if (rootMerge != null)
{
if (mergeCache != null) mergeCache.Put(a, b, rootMerge);
return rootMerge;
}
if (a.returnState == b.returnState)
{ // a == b
PredictionContext parent = Merge(a.parent, b.parent, rootIsWildcard, mergeCache);
// if parent is same as existing a or b parent or reduced to a parent, return it
if (parent == a.parent) return a; // ax + bx = ax, if a=b
if (parent == b.parent) return b; // ax + bx = bx, if a=b
// else: ax + ay = a'[x,y]
// merge parents x and y, giving array node with x,y then remainders
// of those graphs. dup a, a' points at merged array
// new joined parent so create new singleton pointing to it, a'
PredictionContext a_ = SingletonPredictionContext.Create(parent, a.returnState);
if (mergeCache != null) mergeCache.Put(a, b, a_);
return a_;
}
else { // a != b payloads differ
// see if we can collapse parents due to $+x parents if local ctx
int[] payloads = new int[2];
PredictionContext[] parents = new PredictionContext[2];
PredictionContext pc;
PredictionContext singleParent = null;
if (a == b || (a.parent != null && a.parent.Equals(b.parent)))
{ // ax + bx = [a,b]x
singleParent = a.parent;
}
if (singleParent != null)
{ // parents are same
// sort payloads and use same parent
if (a.returnState > b.returnState)
{
payloads[0] = b.returnState;
payloads[1] = a.returnState;
}
else {
payloads[0] = a.returnState;
payloads[1] = b.returnState;
}
parents[0] = singleParent;
parents[1] = singleParent;
pc = new ArrayPredictionContext(parents, payloads);
if (mergeCache != null)
mergeCache.Put(a, b, pc);
return pc;
}
// parents differ and can't merge them. Just pack together
// into array; can't merge.
// ax + by = [ax,by]
// sort by payload
if (a.returnState > b.returnState)
{
payloads[0] = b.returnState;
payloads[1] = a.returnState;
parents[0] = b.parent;
parents[1] = a.parent;
}
else {
payloads[0] = a.returnState;
payloads[1] = b.returnState;
parents[0] = a.parent;
parents[1] = b.parent;
}
pc = new ArrayPredictionContext(parents, payloads);
if (mergeCache != null)
mergeCache.Put(a, b, pc);
return pc;
}
}
public static PredictionContext MergeArrays(
ArrayPredictionContext a,
ArrayPredictionContext b,
bool rootIsWildcard,
MergeCache mergeCache)
{
if (mergeCache != null)
{
PredictionContext previous = mergeCache.Get(a, b);
if (previous != null)
return previous;
previous = mergeCache.Get(b, a);
if (previous != null)
return previous;
}
// merge sorted payloads a + b => M
int i = 0; // walks a
int j = 0; // walks b
int k = 0; // walks target M array
int[] mergedReturnStates =
new int[a.returnStates.Length + b.returnStates.Length];
PredictionContext[] mergedParents =
new PredictionContext[a.returnStates.Length + b.returnStates.Length];
// walk and merge to yield mergedParents, mergedReturnStates
while (i < a.returnStates.Length && j < b.returnStates.Length)
{
PredictionContext a_parent = a.parents[i];
PredictionContext b_parent = b.parents[j];
if (a.returnStates[i] == b.returnStates[j])
{
// same payload (stack tops are equal), must yield merged singleton
int payload = a.returnStates[i];
// $+$ = $
bool both_dollar = payload == EMPTY_RETURN_STATE &&
a_parent == null && b_parent == null;
bool ax_ax = (a_parent != null && b_parent != null) &&
a_parent.Equals(b_parent); // ax+ax -> ax
if (both_dollar || ax_ax ) {
mergedParents[k] = a_parent; // choose left
mergedReturnStates[k] = payload;
}
else { // ax+ay -> a'[x,y]
PredictionContext mergedParent =
Merge(a_parent, b_parent, rootIsWildcard, mergeCache);
mergedParents[k] = mergedParent;
mergedReturnStates[k] = payload;
}
i++; // hop over left one as usual
j++; // but also skip one in right side since we merge
}
else if (a.returnStates[i] < b.returnStates[j])
{ // copy a[i] to M
mergedParents[k] = a_parent;
mergedReturnStates[k] = a.returnStates[i];
i++;
}
else { // b > a, copy b[j] to M
mergedParents[k] = b_parent;
mergedReturnStates[k] = b.returnStates[j];
j++;
}
k++;
}
// copy over any payloads remaining in either array
if (i < a.returnStates.Length)
{
for (int p = i; p < a.returnStates.Length; p++)
{
mergedParents[k] = a.parents[p];
mergedReturnStates[k] = a.returnStates[p];
k++;
}
}
else {
for (int p = j; p < b.returnStates.Length; p++)
{
mergedParents[k] = b.parents[p];
mergedReturnStates[k] = b.returnStates[p];
k++;
}
}
// trim merged if we combined a few that had same stack tops
if (k < mergedParents.Length)
{ // write index < last position; trim
if (k == 1)
{ // for just one merged element, return singleton top
PredictionContext a_ = SingletonPredictionContext.Create(mergedParents[0], mergedReturnStates[0]);
if (mergeCache != null) mergeCache.Put(a, b, a_);
return a_;
}
mergedParents = Arrays.CopyOf(mergedParents, k);
mergedReturnStates = Arrays.CopyOf(mergedReturnStates, k);
}
PredictionContext M = new ArrayPredictionContext(mergedParents, mergedReturnStates);
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
if (M.Equals(a))
{
if (mergeCache != null)
mergeCache.Put(a, b, a);
return a;
}
if (M.Equals(b))
{
if (mergeCache != null)
mergeCache.Put(a, b, b);
return b;
}
CombineCommonParents(mergedParents);
if (mergeCache != null)
mergeCache.Put(a, b, M);
return M;
}
protected static void CombineCommonParents(PredictionContext[] parents)
{
Dictionary<PredictionContext, PredictionContext> uniqueParents = new Dictionary<PredictionContext, PredictionContext>();
for (int p = 0; p < parents.Length; p++)
{
PredictionContext parent = parents[p];
if (parent!=null && !uniqueParents.ContainsKey(parent))
{ // don't replace
uniqueParents.Put(parent, parent);
}
}
for (int p = 0; p < parents.Length; p++)
{
PredictionContext parent = parents[p];
if (parent!=null)
parents[p] = uniqueParents.Get(parent);
}
}
public static PredictionContext MergeRoot(SingletonPredictionContext a,
SingletonPredictionContext b,
bool rootIsWildcard)
{
if (rootIsWildcard)
{
if (a == PredictionContext.EMPTY)
return PredictionContext.EMPTY; // * + b = *
if (b == PredictionContext.EMPTY)
return PredictionContext.EMPTY; // a + * = *
}
else {
if (a == EMPTY && b == EMPTY) return EMPTY; // $ + $ = $
if (a == EMPTY)
{ // $ + x = [$,x]
int[] payloads = { b.returnState, EMPTY_RETURN_STATE };
PredictionContext[] parents = { b.parent, null };
PredictionContext joined =
new ArrayPredictionContext(parents, payloads);
return joined;
}
if (b == EMPTY)
{ // x + $ = [$,x] ($ is always first if present)
int[] payloads = { a.returnState, EMPTY_RETURN_STATE };
PredictionContext[] parents = { a.parent, null };
PredictionContext joined =
new ArrayPredictionContext(parents, payloads);
return joined;
}
}
return null;
}
public static PredictionContext GetCachedContext(PredictionContext context, PredictionContextCache contextCache, PredictionContext.IdentityHashMap visited)
{
if (context.IsEmpty)
{
return context;
}
PredictionContext existing = visited.Get(context);
if (existing != null)
{
return existing;
}
existing = contextCache.Get(context);
if (existing != null)
{
visited.Put(context, existing);
return existing;
}
bool changed = false;
PredictionContext[] parents = new PredictionContext[context.Size];
for (int i = 0; i < parents.Length; i++)
{
PredictionContext parent = GetCachedContext(context.GetParent(i), contextCache, visited);
if (changed || parent != context.GetParent(i))
{
if (!changed)
{
parents = new PredictionContext[context.Size];
for (int j = 0; j < context.Size; j++)
{
parents[j] = context.GetParent(j);
}
changed = true;
}
parents[i] = parent;
}
}
if (!changed)
{
contextCache.Add(context);
visited.Put(context, context);
return context;
}
PredictionContext updated;
if (parents.Length == 0)
{
updated = EMPTY;
}
else if (parents.Length == 1)
{
updated = SingletonPredictionContext.Create(parents[0], context.GetReturnState(0));
}
else {
ArrayPredictionContext arrayPredictionContext = (ArrayPredictionContext)context;
updated = new ArrayPredictionContext(parents, arrayPredictionContext.returnStates);
}
contextCache.Add(updated);
visited.Put(updated, updated);
visited.Put(context, updated);
return updated;
}
public virtual PredictionContext GetChild(int returnState)
{
return new SingletonPredictionContext(this, returnState);
}
public virtual string[] ToStrings(IRecognizer recognizer, int currentState)
{
return ToStrings(recognizer, PredictionContext.EMPTY, currentState);
}
public virtual string[] ToStrings(IRecognizer recognizer, PredictionContext stop, int currentState)
{
List<string> result = new List<string>();
for (int perm = 0; ; perm++)
{
int offset = 0;
bool last = true;
PredictionContext p = this;
int stateNumber = currentState;
StringBuilder localBuffer = new StringBuilder();
localBuffer.Append("[");
while (!p.IsEmpty && p != stop)
{
int index = 0;
if (p.Size > 0)
{
int bits = 1;
while ((1 << bits) < p.Size)
{
bits++;
}
int mask = (1 << bits) - 1;
index = (perm >> offset) & mask;
last &= index >= p.Size - 1;
if (index >= p.Size)
{
goto outer_continue;
}
offset += bits;
}
if (recognizer != null)
{
if (localBuffer.Length > 1)
{
// first char is '[', if more than that this isn't the first rule
localBuffer.Append(' ');
}
ATN atn = recognizer.Atn;
ATNState s = atn.states[stateNumber];
string ruleName = recognizer.RuleNames[s.ruleIndex];
localBuffer.Append(ruleName);
}
else
{
if (p.GetReturnState(index) != EMPTY_RETURN_STATE)
{
if (!p.IsEmpty)
{
if (localBuffer.Length > 1)
{
// first char is '[', if more than that this isn't the first rule
localBuffer.Append(' ');
}
localBuffer.Append(p.GetReturnState(index));
}
}
}
stateNumber = p.GetReturnState(index);
p = p.GetParent(index);
}
localBuffer.Append("]");
result.Add(localBuffer.ToString());
if (last)
{
break;
}
outer_continue:;
}
return result.ToArray();
}
internal sealed class IdentityHashMap : Dictionary<PredictionContext, PredictionContext>
{
public IdentityHashMap()
: base(PredictionContext.IdentityEqualityComparator.Instance)
{
}
}
internal sealed class IdentityEqualityComparator : EqualityComparer<PredictionContext>
{
public static readonly PredictionContext.IdentityEqualityComparator Instance = new PredictionContext.IdentityEqualityComparator();
private IdentityEqualityComparator()
{
}
public override int GetHashCode(PredictionContext obj)
{
return obj.GetHashCode();
}
public override bool Equals(PredictionContext a, PredictionContext b)
{
return a == b;
}
}
}
}

Просмотреть файл

@ -0,0 +1,50 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal class PredictionContextCache
{
protected readonly Dictionary<PredictionContext, PredictionContext> cache =
new Dictionary<PredictionContext, PredictionContext>();
/** Add a context to the cache and return it. If the context already exists,
* return that one instead and do not add a new context to the cache.
* Protect shared cache from unsafe thread access.
*/
public PredictionContext Add(PredictionContext ctx)
{
if (ctx == PredictionContext.EMPTY)
return PredictionContext.EMPTY;
PredictionContext existing = cache.Get(ctx);
if (existing != null)
{
return existing;
}
cache.Put(ctx, ctx);
return ctx;
}
public PredictionContext Get(PredictionContext ctx)
{
return cache.Get(ctx);
}
public int Count
{
get
{
return cache.Count;
}
}
}
}

Просмотреть файл

@ -0,0 +1,890 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>
/// This enumeration defines the prediction modes available in ANTLR 4 along with
/// utility methods for analyzing configuration sets for conflicts and/or
/// ambiguities.
/// </summary>
/// <remarks>
/// This enumeration defines the prediction modes available in ANTLR 4 along with
/// utility methods for analyzing configuration sets for conflicts and/or
/// ambiguities.
/// </remarks>
[System.Serializable]
internal sealed class PredictionMode
{
/// <summary>The SLL(*) prediction mode.</summary>
/// <remarks>
/// The SLL(*) prediction mode. This prediction mode ignores the current
/// parser context when making predictions. This is the fastest prediction
/// mode, and provides correct results for many grammars. This prediction
/// mode is more powerful than the prediction mode provided by ANTLR 3, but
/// may result in syntax errors for grammar and input combinations which are
/// not SLL.
/// <p>
/// When using this prediction mode, the parser will either return a correct
/// parse tree (i.e. the same parse tree that would be returned with the
/// <see cref="LL"/>
/// prediction mode), or it will report a syntax error. If a
/// syntax error is encountered when using the
/// <see cref="SLL"/>
/// prediction mode,
/// it may be due to either an actual syntax error in the input or indicate
/// that the particular combination of grammar and input requires the more
/// powerful
/// <see cref="LL"/>
/// prediction abilities to complete successfully.</p>
/// <p>
/// This prediction mode does not provide any guarantees for prediction
/// behavior for syntactically-incorrect inputs.</p>
/// </remarks>
public static readonly PredictionMode SLL = new PredictionMode();
/// <summary>The LL(*) prediction mode.</summary>
/// <remarks>
/// The LL(*) prediction mode. This prediction mode allows the current parser
/// context to be used for resolving SLL conflicts that occur during
/// prediction. This is the fastest prediction mode that guarantees correct
/// parse results for all combinations of grammars with syntactically correct
/// inputs.
/// <p>
/// When using this prediction mode, the parser will make correct decisions
/// for all syntactically-correct grammar and input combinations. However, in
/// cases where the grammar is truly ambiguous this prediction mode might not
/// report a precise answer for <em>exactly which</em> alternatives are
/// ambiguous.</p>
/// <p>
/// This prediction mode does not provide any guarantees for prediction
/// behavior for syntactically-incorrect inputs.</p>
/// </remarks>
public static readonly PredictionMode LL = new PredictionMode();
/// <summary>The LL(*) prediction mode with exact ambiguity detection.</summary>
/// <remarks>
/// The LL(*) prediction mode with exact ambiguity detection. In addition to
/// the correctness guarantees provided by the
/// <see cref="LL"/>
/// prediction mode,
/// this prediction mode instructs the prediction algorithm to determine the
/// complete and exact set of ambiguous alternatives for every ambiguous
/// decision encountered while parsing.
/// <p>
/// This prediction mode may be used for diagnosing ambiguities during
/// grammar development. Due to the performance overhead of calculating sets
/// of ambiguous alternatives, this prediction mode should be avoided when
/// the exact results are not necessary.</p>
/// <p>
/// This prediction mode does not provide any guarantees for prediction
/// behavior for syntactically-incorrect inputs.</p>
/// </remarks>
public static readonly PredictionMode LL_EXACT_AMBIG_DETECTION = new PredictionMode();
/// <summary>A Map that uses just the state and the stack context as the key.</summary>
/// <remarks>A Map that uses just the state and the stack context as the key.</remarks>
internal class AltAndContextMap : Dictionary<ATNConfig, BitSet>
{
public AltAndContextMap()
: base(PredictionMode.AltAndContextConfigEqualityComparator.Instance)
{
}
}
private sealed class AltAndContextConfigEqualityComparator : EqualityComparer<ATNConfig>
{
public static readonly PredictionMode.AltAndContextConfigEqualityComparator Instance = new PredictionMode.AltAndContextConfigEqualityComparator();
private AltAndContextConfigEqualityComparator()
{
}
/// <summary>
/// The hash code is only a function of the
/// <see cref="ATNState.stateNumber"/>
/// and
/// <see cref="ATNConfig.context"/>
/// .
/// </summary>
public override int GetHashCode(ATNConfig o)
{
int hashCode = MurmurHash.Initialize(7);
hashCode = MurmurHash.Update(hashCode, o.state.stateNumber);
hashCode = MurmurHash.Update(hashCode, o.context);
hashCode = MurmurHash.Finish(hashCode, 2);
return hashCode;
}
public override bool Equals(ATNConfig a, ATNConfig b)
{
if (a == b)
{
return true;
}
if (a == null || b == null)
{
return false;
}
return a.state.stateNumber == b.state.stateNumber && a.context.Equals(b.context);
}
}
/// <summary>Computes the SLL prediction termination condition.</summary>
/// <remarks>
/// Computes the SLL prediction termination condition.
/// <p>
/// This method computes the SLL prediction termination condition for both of
/// the following cases.</p>
/// <ul>
/// <li>The usual SLL+LL fallback upon SLL conflict</li>
/// <li>Pure SLL without LL fallback</li>
/// </ul>
/// <p><strong>COMBINED SLL+LL PARSING</strong></p>
/// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
/// ensured regardless of how the termination condition is computed by this
/// method. Due to the substantially higher cost of LL prediction, the
/// prediction should only fall back to LL when the additional lookahead
/// cannot lead to a unique SLL prediction.</p>
/// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
/// conflicting subsets should fall back to full LL, even if the
/// configuration sets don't resolve to the same alternative (e.g.
/// <c/>
///
/// 1,2}} and
/// <c/>
///
/// 3,4}}. If there is at least one non-conflicting
/// configuration, SLL could continue with the hopes that more lookahead will
/// resolve via one of those non-conflicting configurations.</p>
/// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
/// stops when it sees only conflicting configuration subsets. In contrast,
/// full LL keeps going when there is uncertainty.</p>
/// <p><strong>HEURISTIC</strong></p>
/// <p>As a heuristic, we stop prediction when we see any conflicting subset
/// unless we see a state that only has one alternative associated with it.
/// The single-alt-state thing lets prediction continue upon rules like
/// (otherwise, it would admit defeat too soon):</p>
/// <p>
/// <c>[12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;</c>
/// </p>
/// <p>When the ATN simulation reaches the state before
/// <c>';'</c>
/// , it has a
/// DFA state that looks like:
/// <c>[12|1|[], 6|2|[], 12|2|[]]</c>
/// . Naturally
/// <c>12|1|[]</c>
/// and
/// <c>12|2|[]</c>
/// conflict, but we cannot stop
/// processing this node because alternative to has another way to continue,
/// via
/// <c>[6|2|[]]</c>
/// .</p>
/// <p>It also let's us continue for this rule:</p>
/// <p>
/// <c>[1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;</c>
/// </p>
/// <p>After matching input A, we reach the stop state for rule A, state 1.
/// State 8 is the state right before B. Clearly alternatives 1 and 2
/// conflict and no amount of further lookahead will separate the two.
/// However, alternative 3 will be able to continue and so we do not stop
/// working on this state. In the previous example, we're concerned with
/// states associated with the conflicting alternatives. Here alt 3 is not
/// associated with the conflicting configs, but since we can continue
/// looking for input reasonably, don't declare the state done.</p>
/// <p><strong>PURE SLL PARSING</strong></p>
/// <p>To handle pure SLL parsing, all we have to do is make sure that we
/// combine stack contexts for configurations that differ only by semantic
/// predicate. From there, we can do the usual SLL termination heuristic.</p>
/// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
/// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
/// states because they need to create the DFA cache that works in all
/// semantic situations. In contrast, full LL evaluates predicates collected
/// during start state computation so it can ignore predicates thereafter.
/// This means that SLL termination detection can totally ignore semantic
/// predicates.</p>
/// <p>Implementation-wise,
/// <see cref="ATNConfigSet"/>
/// combines stack contexts but not
/// semantic predicate contexts so we might see two configurations like the
/// following.</p>
/// <p>
/// <c/>
/// (s, 1, x,
/// ), (s, 1, x', {p})}</p>
/// <p>Before testing these configurations against others, we have to merge
/// <c>x</c>
/// and
/// <c>x'</c>
/// (without modifying the existing configurations).
/// For example, we test
/// <c>(x+x')==x''</c>
/// when looking for conflicts in
/// the following configurations.</p>
/// <p>
/// <c/>
/// (s, 1, x,
/// ), (s, 1, x', {p}), (s, 2, x'', {})}</p>
/// <p>If the configuration set has predicates (as indicated by
/// <see cref="ATNConfigSet.hasSemanticContext"/>
/// ), this algorithm makes a copy of
/// the configurations to strip out all of the predicates so that a standard
/// <see cref="ATNConfigSet"/>
/// will merge everything ignoring predicates.</p>
/// </remarks>
public static bool HasSLLConflictTerminatingPrediction(PredictionMode mode, ATNConfigSet configSet)
{
if (AllConfigsInRuleStopStates(configSet.configs))
{
return true;
}
// pure SLL mode parsing
if (mode == PredictionMode.SLL)
{
// Don't bother with combining configs from different semantic
// contexts if we can fail over to full LL; costs more time
// since we'll often fail over anyway.
if (configSet.hasSemanticContext)
{
// dup configs, tossing out semantic predicates
ATNConfigSet dup = new ATNConfigSet();
foreach (ATNConfig c in configSet.configs)
{
dup.Add(new ATNConfig(c, SemanticContext.NONE));
}
configSet = dup;
}
}
// now we have combined contexts for configs with dissimilar preds
// pure SLL or combined SLL+LL mode parsing
ICollection<BitSet> altsets = GetConflictingAltSubsets(configSet.configs);
bool heuristic = HasConflictingAltSet(altsets) && !HasStateAssociatedWithOneAlt(configSet.configs);
return heuristic;
}
/// <summary>
/// Checks if any configuration in
/// <paramref name="configs"/>
/// is in a
/// <see cref="RuleStopState"/>
/// . Configurations meeting this condition have reached
/// the end of the decision rule (local context) or end of start rule (full
/// context).
/// </summary>
/// <param name="configs">the configuration set to test</param>
/// <returns>
///
/// <see langword="true"/>
/// if any configuration in
/// <paramref name="configs"/>
/// is in a
/// <see cref="RuleStopState"/>
/// , otherwise
/// <see langword="false"/>
/// </returns>
public static bool HasConfigInRuleStopState(IEnumerable<ATNConfig> configs)
{
foreach (ATNConfig c in configs)
{
if (c.state is RuleStopState)
{
return true;
}
}
return false;
}
/// <summary>
/// Checks if all configurations in
/// <paramref name="configs"/>
/// are in a
/// <see cref="RuleStopState"/>
/// . Configurations meeting this condition have reached
/// the end of the decision rule (local context) or end of start rule (full
/// context).
/// </summary>
/// <param name="configs">the configuration set to test</param>
/// <returns>
///
/// <see langword="true"/>
/// if all configurations in
/// <paramref name="configs"/>
/// are in a
/// <see cref="RuleStopState"/>
/// , otherwise
/// <see langword="false"/>
/// </returns>
public static bool AllConfigsInRuleStopStates(IEnumerable<ATNConfig> configs)
{
foreach (ATNConfig config in configs)
{
if (!(config.state is RuleStopState))
{
return false;
}
}
return true;
}
/// <summary>Full LL prediction termination.</summary>
/// <remarks>
/// Full LL prediction termination.
/// <p>Can we stop looking ahead during ATN simulation or is there some
/// uncertainty as to which alternative we will ultimately pick, after
/// consuming more input? Even if there are partial conflicts, we might know
/// that everything is going to resolve to the same minimum alternative. That
/// means we can stop since no more lookahead will change that fact. On the
/// other hand, there might be multiple conflicts that resolve to different
/// minimums. That means we need more look ahead to decide which of those
/// alternatives we should predict.</p>
/// <p>The basic idea is to split the set of configurations
/// <c>C</c>
/// , into
/// conflicting subsets
/// <c>(s, _, ctx, _)</c>
/// and singleton subsets with
/// non-conflicting configurations. Two configurations conflict if they have
/// identical
/// <see cref="ATNConfig.state"/>
/// and
/// <see cref="ATNConfig.context"/>
/// values
/// but different
/// <see cref="ATNConfig.alt"/>
/// value, e.g.
/// <c>(s, i, ctx, _)</c>
/// and
/// <c>(s, j, ctx, _)</c>
/// for
/// <c>i!=j</c>
/// .</p>
/// <p/>
/// Reduce these configuration subsets to the set of possible alternatives.
/// You can compute the alternative subsets in one pass as follows:
/// <p/>
/// <c/>
/// A_s,ctx =
/// i | (s, i, ctx, _)}} for each configuration in
/// <c>C</c>
/// holding
/// <c>s</c>
/// and
/// <c>ctx</c>
/// fixed.
/// <p/>
/// Or in pseudo-code, for each configuration
/// <c>c</c>
/// in
/// <c>C</c>
/// :
/// <pre>
/// map[c] U= c.
/// <see cref="ATNConfig.alt">getAlt()</see>
/// # map hash/equals uses s and x, not
/// alt and not pred
/// </pre>
/// <p>The values in
/// <c>map</c>
/// are the set of
/// <c>A_s,ctx</c>
/// sets.</p>
/// <p>If
/// <c>|A_s,ctx|=1</c>
/// then there is no conflict associated with
/// <c>s</c>
/// and
/// <c>ctx</c>
/// .</p>
/// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
/// the union of these alternative subsets is a singleton, then no amount of
/// more lookahead will help us. We will always pick that alternative. If,
/// however, there is more than one alternative, then we are uncertain which
/// alternative to predict and must continue looking for resolution. We may
/// or may not discover an ambiguity in the future, even if there are no
/// conflicting subsets this round.</p>
/// <p>The biggest sin is to terminate early because it means we've made a
/// decision but were uncertain as to the eventual outcome. We haven't used
/// enough lookahead. On the other hand, announcing a conflict too late is no
/// big deal; you will still have the conflict. It's just inefficient. It
/// might even look until the end of file.</p>
/// <p>No special consideration for semantic predicates is required because
/// predicates are evaluated on-the-fly for full LL prediction, ensuring that
/// no configuration contains a semantic context during the termination
/// check.</p>
/// <p><strong>CONFLICTING CONFIGS</strong></p>
/// <p>Two configurations
/// <c>(s, i, x)</c>
/// and
/// <c>(s, j, x')</c>
/// , conflict
/// when
/// <c>i!=j</c>
/// but
/// <c>x=x'</c>
/// . Because we merge all
/// <c>(s, i, _)</c>
/// configurations together, that means that there are at
/// most
/// <c>n</c>
/// configurations associated with state
/// <c>s</c>
/// for
/// <c>n</c>
/// possible alternatives in the decision. The merged stacks
/// complicate the comparison of configuration contexts
/// <c>x</c>
/// and
/// <c>x'</c>
/// . Sam checks to see if one is a subset of the other by calling
/// merge and checking to see if the merged result is either
/// <c>x</c>
/// or
/// <c>x'</c>
/// . If the
/// <c>x</c>
/// associated with lowest alternative
/// <c>i</c>
/// is the superset, then
/// <c>i</c>
/// is the only possible prediction since the
/// others resolve to
/// <c>min(i)</c>
/// as well. However, if
/// <c>x</c>
/// is
/// associated with
/// <c>j&gt;i</c>
/// then at least one stack configuration for
/// <c>j</c>
/// is not in conflict with alternative
/// <c>i</c>
/// . The algorithm
/// should keep going, looking for more lookahead due to the uncertainty.</p>
/// <p>For simplicity, I'm doing a equality check between
/// <c>x</c>
/// and
/// <c>x'</c>
/// that lets the algorithm continue to consume lookahead longer
/// than necessary. The reason I like the equality is of course the
/// simplicity but also because that is the test you need to detect the
/// alternatives that are actually in conflict.</p>
/// <p><strong>CONTINUE/STOP RULE</strong></p>
/// <p>Continue if union of resolved alternative sets from non-conflicting and
/// conflicting alternative subsets has more than one alternative. We are
/// uncertain about which alternative to predict.</p>
/// <p>The complete set of alternatives,
/// <c>[i for (_,i,_)]</c>
/// , tells us which
/// alternatives are still in the running for the amount of input we've
/// consumed at this point. The conflicting sets let us to strip away
/// configurations that won't lead to more states because we resolve
/// conflicts to the configuration with a minimum alternate for the
/// conflicting set.</p>
/// <p><strong>CASES</strong></p>
/// <ul>
/// <li>no conflicts and more than 1 alternative in set =&gt; continue</li>
/// <li>
/// <c>(s, 1, x)</c>
/// ,
/// <c>(s, 2, x)</c>
/// ,
/// <c>(s, 3, z)</c>
/// ,
/// <c>(s', 1, y)</c>
/// ,
/// <c>(s', 2, y)</c>
/// yields non-conflicting set
/// <c/>
///
/// 3}} U conflicting sets
/// <c/>
/// min(
/// 1,2})} U
/// <c/>
/// min(
/// 1,2})} =
/// <c/>
///
/// 1,3}} =&gt; continue
/// </li>
/// <li>
/// <c>(s, 1, x)</c>
/// ,
/// <c>(s, 2, x)</c>
/// ,
/// <c>(s', 1, y)</c>
/// ,
/// <c>(s', 2, y)</c>
/// ,
/// <c>(s'', 1, z)</c>
/// yields non-conflicting set
/// <c/>
///
/// 1}} U conflicting sets
/// <c/>
/// min(
/// 1,2})} U
/// <c/>
/// min(
/// 1,2})} =
/// <c/>
///
/// 1}} =&gt; stop and predict 1</li>
/// <li>
/// <c>(s, 1, x)</c>
/// ,
/// <c>(s, 2, x)</c>
/// ,
/// <c>(s', 1, y)</c>
/// ,
/// <c>(s', 2, y)</c>
/// yields conflicting, reduced sets
/// <c/>
///
/// 1}} U
/// <c/>
///
/// 1}} =
/// <c/>
///
/// 1}} =&gt; stop and predict 1, can announce
/// ambiguity
/// <c/>
///
/// 1,2}}</li>
/// <li>
/// <c>(s, 1, x)</c>
/// ,
/// <c>(s, 2, x)</c>
/// ,
/// <c>(s', 2, y)</c>
/// ,
/// <c>(s', 3, y)</c>
/// yields conflicting, reduced sets
/// <c/>
///
/// 1}} U
/// <c/>
///
/// 2}} =
/// <c/>
///
/// 1,2}} =&gt; continue</li>
/// <li>
/// <c>(s, 1, x)</c>
/// ,
/// <c>(s, 2, x)</c>
/// ,
/// <c>(s', 3, y)</c>
/// ,
/// <c>(s', 4, y)</c>
/// yields conflicting, reduced sets
/// <c/>
///
/// 1}} U
/// <c/>
///
/// 3}} =
/// <c/>
///
/// 1,3}} =&gt; continue</li>
/// </ul>
/// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
/// <p>If all states report the same conflicting set of alternatives, then we
/// know we have the exact ambiguity set.</p>
/// <p><code>|A_<em>i</em>|&gt;1</code> and
/// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
/// <p>In other words, we continue examining lookahead until all
/// <c>A_i</c>
/// have more than one alternative and all
/// <c>A_i</c>
/// are the same. If
/// <c/>
/// A=
/// {1,2}, {1,3}}}, then regular LL prediction would terminate
/// because the resolved set is
/// <c/>
///
/// 1}}. To determine what the real
/// ambiguity is, we have to know whether the ambiguity is between one and
/// two or one and three so we keep going. We can only stop prediction when
/// we need exact ambiguity detection when the sets look like
/// <c/>
/// A=
/// {1,2}}} or
/// <c/>
///
/// {1,2},{1,2}}}, etc...</p>
/// </remarks>
public static int ResolvesToJustOneViableAlt(IEnumerable<BitSet> altsets)
{
return GetSingleViableAlt(altsets);
}
/// <summary>
/// Determines if every alternative subset in
/// <paramref name="altsets"/>
/// contains more
/// than one alternative.
/// </summary>
/// <param name="altsets">a collection of alternative subsets</param>
/// <returns>
///
/// <see langword="true"/>
/// if every
/// <see cref="Antlr4.Runtime.Sharpen.BitSet"/>
/// in
/// <paramref name="altsets"/>
/// has
/// <see cref="Antlr4.Runtime.Sharpen.BitSet.Cardinality()">cardinality</see>
/// &gt; 1, otherwise
/// <see langword="false"/>
/// </returns>
public static bool AllSubsetsConflict(IEnumerable<BitSet> altsets)
{
return !HasNonConflictingAltSet(altsets);
}
/// <summary>
/// Determines if any single alternative subset in
/// <paramref name="altsets"/>
/// contains
/// exactly one alternative.
/// </summary>
/// <param name="altsets">a collection of alternative subsets</param>
/// <returns>
///
/// <see langword="true"/>
/// if
/// <paramref name="altsets"/>
/// contains a
/// <see cref="Antlr4.Runtime.Sharpen.BitSet"/>
/// with
/// <see cref="Antlr4.Runtime.Sharpen.BitSet.Cardinality()">cardinality</see>
/// 1, otherwise
/// <see langword="false"/>
/// </returns>
public static bool HasNonConflictingAltSet(IEnumerable<BitSet> altsets)
{
foreach (BitSet alts in altsets)
{
if (alts.Cardinality() == 1)
{
return true;
}
}
return false;
}
/// <summary>
/// Determines if any single alternative subset in
/// <paramref name="altsets"/>
/// contains
/// more than one alternative.
/// </summary>
/// <param name="altsets">a collection of alternative subsets</param>
/// <returns>
///
/// <see langword="true"/>
/// if
/// <paramref name="altsets"/>
/// contains a
/// <see cref="Antlr4.Runtime.Sharpen.BitSet"/>
/// with
/// <see cref="Antlr4.Runtime.Sharpen.BitSet.Cardinality()">cardinality</see>
/// &gt; 1, otherwise
/// <see langword="false"/>
/// </returns>
public static bool HasConflictingAltSet(IEnumerable<BitSet> altsets)
{
foreach (BitSet alts in altsets)
{
if (alts.Cardinality() > 1)
{
return true;
}
}
return false;
}
/// <summary>
/// Determines if every alternative subset in
/// <paramref name="altsets"/>
/// is equivalent.
/// </summary>
/// <param name="altsets">a collection of alternative subsets</param>
/// <returns>
///
/// <see langword="true"/>
/// if every member of
/// <paramref name="altsets"/>
/// is equal to the
/// others, otherwise
/// <see langword="false"/>
/// </returns>
public static bool AllSubsetsEqual(IEnumerable<BitSet> altsets)
{
IEnumerator<BitSet> it = altsets.GetEnumerator();
it.MoveNext();
BitSet first = it.Current;
while (it.MoveNext())
{
BitSet next = it.Current;
if (!next.Equals(first))
{
return false;
}
}
return true;
}
/// <summary>
/// Returns the unique alternative predicted by all alternative subsets in
/// <paramref name="altsets"/>
/// . If no such alternative exists, this method returns
/// <see cref="ATN.INVALID_ALT_NUMBER"/>
/// .
/// </summary>
/// <param name="altsets">a collection of alternative subsets</param>
public static int GetUniqueAlt(IEnumerable<BitSet> altsets)
{
BitSet all = GetAlts(altsets);
if (all.Cardinality() == 1)
{
return all.NextSetBit(0);
}
return ATN.INVALID_ALT_NUMBER;
}
/// <summary>
/// Gets the complete set of represented alternatives for a collection of
/// alternative subsets.
/// </summary>
/// <remarks>
/// Gets the complete set of represented alternatives for a collection of
/// alternative subsets. This method returns the union of each
/// <see cref="Antlr4.Runtime.Sharpen.BitSet"/>
/// in
/// <paramref name="altsets"/>
/// .
/// </remarks>
/// <param name="altsets">a collection of alternative subsets</param>
/// <returns>
/// the set of represented alternatives in
/// <paramref name="altsets"/>
/// </returns>
public static BitSet GetAlts(IEnumerable<BitSet> altsets)
{
BitSet all = new BitSet();
foreach (BitSet alts in altsets)
{
all.Or(alts);
}
return all;
}
/// <summary>This function gets the conflicting alt subsets from a configuration set.</summary>
/// <remarks>
/// This function gets the conflicting alt subsets from a configuration set.
/// For each configuration
/// <c>c</c>
/// in
/// <paramref name="configs"/>
/// :
/// <pre>
/// map[c] U= c.
/// <see cref="ATNConfig.alt">getAlt()</see>
/// # map hash/equals uses s and x, not
/// alt and not pred
/// </pre>
/// </remarks>
[return: NotNull]
public static ICollection<BitSet> GetConflictingAltSubsets(IEnumerable<ATNConfig> configs)
{
PredictionMode.AltAndContextMap configToAlts = new PredictionMode.AltAndContextMap();
foreach (ATNConfig c in configs)
{
BitSet alts;
if (!configToAlts.TryGetValue(c, out alts))
{
alts = new BitSet();
configToAlts[c] = alts;
}
alts.Set(c.alt);
}
return configToAlts.Values;
}
/// <summary>Get a map from state to alt subset from a configuration set.</summary>
/// <remarks>
/// Get a map from state to alt subset from a configuration set. For each
/// configuration
/// <c>c</c>
/// in
/// <paramref name="configs"/>
/// :
/// <pre>
/// map[c.
/// <see cref="ATNConfig.state"/>
/// ] U= c.
/// <see cref="ATNConfig.alt"/>
/// </pre>
/// </remarks>
[return: NotNull]
public static IDictionary<ATNState, BitSet> GetStateToAltMap(IEnumerable<ATNConfig> configs)
{
IDictionary<ATNState, BitSet> m = new Dictionary<ATNState, BitSet>();
foreach (ATNConfig c in configs)
{
BitSet alts;
if (!m.TryGetValue(c.state, out alts))
{
alts = new BitSet();
m[c.state] = alts;
}
alts.Set(c.alt);
}
return m;
}
public static bool HasStateAssociatedWithOneAlt(IEnumerable<ATNConfig> configs)
{
IDictionary<ATNState, BitSet> x = GetStateToAltMap(configs);
foreach (BitSet alts in x.Values)
{
if (alts.Cardinality() == 1)
{
return true;
}
}
return false;
}
public static int GetSingleViableAlt(IEnumerable<BitSet> altsets)
{
BitSet viableAlts = new BitSet();
foreach (BitSet alts in altsets)
{
int minAlt = alts.NextSetBit(0);
viableAlts.Set(minAlt);
if (viableAlts.Cardinality() > 1)
{
// more than 1 viable alt
return ATN.INVALID_ALT_NUMBER;
}
}
return viableAlts.NextSetBit(0);
}
}
}

Просмотреть файл

@ -0,0 +1,245 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/**
* @since 4.3
*/
internal class ProfilingATNSimulator : ParserATNSimulator
{
protected readonly DecisionInfo[] decisions;
protected int numDecisions;
protected int sllStopIndex;
protected int llStopIndex;
protected int currentDecision;
protected DFAState currentState;
/** At the point of LL failover, we record how SLL would resolve the conflict so that
* we can determine whether or not a decision / input pair is context-sensitive.
* If LL gives a different result than SLL's predicted alternative, we have a
* context sensitivity for sure. The converse is not necessarily true, however.
* It's possible that after conflict resolution chooses minimum alternatives,
* SLL could get the same answer as LL. Regardless of whether or not the result indicates
* an ambiguity, it is not treated as a context sensitivity because LL prediction
* was not required in order to produce a correct prediction for this decision and input sequence.
* It may in fact still be a context sensitivity but we don't know by looking at the
* minimum alternatives for the current input.
*/
protected int conflictingAltResolvedBySLL;
public ProfilingATNSimulator(Parser parser)
: base(parser,
parser.Interpreter.atn,
parser.Interpreter.decisionToDFA,
parser.Interpreter.getSharedContextCache())
{
numDecisions = atn.decisionToState.Count;
decisions = new DecisionInfo[numDecisions];
for (int i = 0; i < numDecisions; i++)
{
decisions[i] = new DecisionInfo(i);
}
}
public override int AdaptivePredict(ITokenStream input, int decision, ParserRuleContext outerContext)
{
try
{
this.sllStopIndex = -1;
this.llStopIndex = -1;
this.currentDecision = decision;
long start = DateTime.Now.ToFileTime(); // expensive but useful info
int alt = base.AdaptivePredict(input, decision, outerContext);
long stop = DateTime.Now.ToFileTime();
decisions[decision].timeInPrediction += (stop - start);
decisions[decision].invocations++;
int SLL_k = sllStopIndex - startIndex + 1;
decisions[decision].SLL_TotalLook += SLL_k;
decisions[decision].SLL_MinLook = decisions[decision].SLL_MinLook == 0 ? SLL_k : Math.Min(decisions[decision].SLL_MinLook, SLL_k);
if (SLL_k > decisions[decision].SLL_MaxLook)
{
decisions[decision].SLL_MaxLook = SLL_k;
decisions[decision].SLL_MaxLookEvent =
new LookaheadEventInfo(decision, null/*, alt*/, input, startIndex, sllStopIndex, false);
}
if (llStopIndex >= 0)
{
int LL_k = llStopIndex - startIndex + 1;
decisions[decision].LL_TotalLook += LL_k;
decisions[decision].LL_MinLook = decisions[decision].LL_MinLook == 0 ? LL_k : Math.Min(decisions[decision].LL_MinLook, LL_k);
if (LL_k > decisions[decision].LL_MaxLook)
{
decisions[decision].LL_MaxLook = LL_k;
decisions[decision].LL_MaxLookEvent =
new LookaheadEventInfo(decision, null/*, alt*/, input, startIndex, llStopIndex, true);
}
}
return alt;
}
finally
{
this.currentDecision = -1;
}
}
protected override DFAState GetExistingTargetState(DFAState previousD, int t)
{
// this method is called after each time the input position advances
// during SLL prediction
sllStopIndex = input.Index;
DFAState existingTargetState = base.GetExistingTargetState(previousD, t);
if (existingTargetState != null)
{
decisions[currentDecision].SLL_DFATransitions++; // count only if we transition over a DFA state
if (existingTargetState == ERROR)
{
decisions[currentDecision].errors.Add(
new ErrorInfo(currentDecision, null /*previousD.configs*/, input, startIndex, sllStopIndex)
);
}
}
currentState = existingTargetState;
return existingTargetState;
}
protected override DFAState ComputeTargetState(DFA dfa, DFAState previousD, int t)
{
DFAState state = base.ComputeTargetState(dfa, previousD, t);
currentState = state;
return state;
}
protected override ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, bool fullCtx)
{
if (fullCtx)
{
// this method is called after each time the input position advances
// during full context prediction
llStopIndex = input.Index;
}
ATNConfigSet reachConfigs = base.ComputeReachSet(closure, t, fullCtx);
if (fullCtx)
{
decisions[currentDecision].LL_ATNTransitions++; // count computation even if error
if (reachConfigs != null)
{
}
else { // no reach on current lookahead symbol. ERROR.
// TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule()
decisions[currentDecision].errors.Add(
new ErrorInfo(currentDecision, null /*closure*/, input, startIndex, llStopIndex)
);
}
}
else {
decisions[currentDecision].SLL_ATNTransitions++;
if (reachConfigs != null)
{
}
else { // no reach on current lookahead symbol. ERROR.
decisions[currentDecision].errors.Add(
new ErrorInfo(currentDecision, null /*closure*/, input, startIndex, sllStopIndex)
);
}
}
return reachConfigs;
}
protected override bool EvalSemanticContext(SemanticContext pred, ParserRuleContext parserCallStack, int alt, bool fullCtx)
{
bool result = base.EvalSemanticContext(pred, parserCallStack, alt, fullCtx);
if (!(pred is SemanticContext.PrecedencePredicate)) {
bool fullContext = llStopIndex >= 0;
int stopIndex = fullContext ? llStopIndex : sllStopIndex;
decisions[currentDecision].predicateEvals.Add(
new PredicateEvalInfo(null , currentDecision, input, startIndex, stopIndex, pred, result, alt/*, fullCtx*/)
);
}
return result;
}
protected override void ReportAttemptingFullContext(DFA dfa, BitSet conflictingAlts, ATNConfigSet configs, int startIndex, int stopIndex)
{
if (conflictingAlts != null)
{
conflictingAltResolvedBySLL = conflictingAlts.NextSetBit(0);
}
else {
conflictingAltResolvedBySLL = configs.GetAlts().NextSetBit(0);
}
decisions[currentDecision].LL_Fallback++;
base.ReportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex);
}
protected override void ReportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, int startIndex, int stopIndex)
{
if (prediction != conflictingAltResolvedBySLL)
{
decisions[currentDecision].contextSensitivities.Add(
new ContextSensitivityInfo(currentDecision, null /*configs*/, input, startIndex, stopIndex)
);
}
base.ReportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex);
}
protected override void ReportAmbiguity(DFA dfa, DFAState D, int startIndex, int stopIndex, bool exact,
BitSet ambigAlts, ATNConfigSet configSet)
{
int prediction;
if (ambigAlts != null)
{
prediction = ambigAlts.NextSetBit(0);
}
else {
prediction = configSet.GetAlts().NextSetBit(0);
}
if (configSet.fullCtx && prediction != conflictingAltResolvedBySLL)
{
// Even though this is an ambiguity we are reporting, we can
// still detect some context sensitivities. Both SLL and LL
// are showing a conflict, hence an ambiguity, but if they resolve
// to different minimum alternatives we have also identified a
// context sensitivity.
decisions[currentDecision].contextSensitivities.Add( new ContextSensitivityInfo(currentDecision, null /*configs*/, input, startIndex, stopIndex) );
}
decisions[currentDecision].ambiguities.Add(
new AmbiguityInfo(currentDecision, null /*configs, ambigAlts*/,
input, startIndex, stopIndex/*, configs.IsFullContext*/)
);
base.ReportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts, configSet);
}
// ---------------------------------------------------------------------
public DecisionInfo[] getDecisionInfo()
{
return decisions;
}
public DFAState getCurrentState()
{
return currentState;
}
}
}

Просмотреть файл

@ -0,0 +1,54 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal sealed class RangeTransition : Transition
{
public readonly int from;
public readonly int to;
public RangeTransition(ATNState target, int from, int to)
: base(target)
{
this.from = from;
this.to = to;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.RANGE;
}
}
public override IntervalSet Label
{
get
{
return IntervalSet.Of(from, to);
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return symbol >= from && symbol <= to;
}
[return: NotNull]
public override string ToString()
{
return "'" + (char)from + "'..'" + (char)to + "'";
}
}
}

Просмотреть файл

@ -0,0 +1,27 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal sealed class RuleStartState : ATNState
{
public RuleStopState stopState;
public bool isPrecedenceRule;
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.RuleStart;
}
}
}
}

Просмотреть файл

@ -0,0 +1,38 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>The last node in the ATN for a rule, unless that rule is the start symbol.</summary>
/// <remarks>
/// The last node in the ATN for a rule, unless that rule is the start symbol.
/// In that case, there is one transition to EOF. Later, we might encode
/// references to all calls to this rule to compute FOLLOW sets for
/// error handling.
/// </remarks>
internal sealed class RuleStopState : ATNState
{
public override int NonStopStateNumber
{
get
{
return -1;
}
}
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.RuleStop;
}
}
}
}

Просмотреть файл

@ -0,0 +1,64 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Atn
{
internal sealed class RuleTransition : Transition
{
/// <summary>Ptr to the rule definition object for this rule ref</summary>
public readonly int ruleIndex;
public readonly int precedence;
/// <summary>What node to begin computations following ref to rule</summary>
[NotNull]
public ATNState followState;
public bool tailCall;
public bool optimizedTailCall;
[Obsolete(@"UseRuleTransition(RuleStartState, int, int, ATNState) instead.")]
public RuleTransition(RuleStartState ruleStart, int ruleIndex, ATNState followState)
: this(ruleStart, ruleIndex, 0, followState)
{
}
public RuleTransition(RuleStartState ruleStart, int ruleIndex, int precedence, ATNState followState)
: base(ruleStart)
{
// no Rule object at runtime
this.ruleIndex = ruleIndex;
this.precedence = precedence;
this.followState = followState;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.RULE;
}
}
public override bool IsEpsilon
{
get
{
return true;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return false;
}
}
}

Просмотреть файл

@ -0,0 +1,451 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
internal abstract class SemanticContext
{
public static readonly SemanticContext NONE = new SemanticContext.Predicate();
public abstract bool Eval<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
where ATNInterpreter : ATNSimulator;
public virtual SemanticContext EvalPrecedence<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
where ATNInterpreter : ATNSimulator
{
return this;
}
internal class Predicate : SemanticContext
{
public readonly int ruleIndex;
public readonly int predIndex;
public readonly bool isCtxDependent;
protected internal Predicate()
{
// e.g., $i ref in pred
this.ruleIndex = -1;
this.predIndex = -1;
this.isCtxDependent = false;
}
public Predicate(int ruleIndex, int predIndex, bool isCtxDependent)
{
this.ruleIndex = ruleIndex;
this.predIndex = predIndex;
this.isCtxDependent = isCtxDependent;
}
public override bool Eval<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
RuleContext localctx = isCtxDependent ? parserCallStack : null;
return parser.Sempred(localctx, ruleIndex, predIndex);
}
public override int GetHashCode()
{
int hashCode = MurmurHash.Initialize();
hashCode = MurmurHash.Update(hashCode, ruleIndex);
hashCode = MurmurHash.Update(hashCode, predIndex);
hashCode = MurmurHash.Update(hashCode, isCtxDependent ? 1 : 0);
hashCode = MurmurHash.Finish(hashCode, 3);
return hashCode;
}
public override bool Equals(object obj)
{
if (!(obj is SemanticContext.Predicate))
{
return false;
}
if (this == obj)
{
return true;
}
SemanticContext.Predicate p = (SemanticContext.Predicate)obj;
return this.ruleIndex == p.ruleIndex && this.predIndex == p.predIndex && this.isCtxDependent == p.isCtxDependent;
}
public override string ToString()
{
return "{" + ruleIndex + ":" + predIndex + "}?";
}
}
internal class PrecedencePredicate : SemanticContext, IComparable<SemanticContext.PrecedencePredicate>
{
public readonly int precedence;
protected internal PrecedencePredicate()
{
this.precedence = 0;
}
public PrecedencePredicate(int precedence)
{
this.precedence = precedence;
}
public override bool Eval<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
return parser.Precpred(parserCallStack, precedence);
}
public override SemanticContext EvalPrecedence<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
if (parser.Precpred(parserCallStack, precedence))
{
return SemanticContext.NONE;
}
else
{
return null;
}
}
public virtual int CompareTo(SemanticContext.PrecedencePredicate o)
{
return precedence - o.precedence;
}
public override int GetHashCode()
{
int hashCode = 1;
hashCode = 31 * hashCode + precedence;
return hashCode;
}
public override bool Equals(object obj)
{
if (!(obj is SemanticContext.PrecedencePredicate))
{
return false;
}
if (this == obj)
{
return true;
}
SemanticContext.PrecedencePredicate other = (SemanticContext.PrecedencePredicate)obj;
return this.precedence == other.precedence;
}
public override string ToString()
{
// precedence >= _precedenceStack.peek()
return "{" + precedence + ">=prec}?";
}
}
internal abstract class Operator : SemanticContext
{
[NotNull]
public abstract ICollection<SemanticContext> Operands
{
get;
}
}
internal class AND : SemanticContext.Operator
{
[NotNull]
public readonly SemanticContext[] opnds;
public AND(SemanticContext a, SemanticContext b)
{
HashSet<SemanticContext> operands = new HashSet<SemanticContext>();
if (a is SemanticContext.AND)
{
operands.UnionWith(((AND)a).opnds);
}
else
{
operands.Add(a);
}
if (b is SemanticContext.AND)
{
operands.UnionWith(((AND)b).opnds);
}
else
{
operands.Add(b);
}
IList<SemanticContext.PrecedencePredicate> precedencePredicates = FilterPrecedencePredicates(operands);
if (precedencePredicates.Count > 0)
{
// interested in the transition with the lowest precedence
SemanticContext.PrecedencePredicate reduced = precedencePredicates.Min();
operands.Add(reduced);
}
opnds = operands.ToArray();
}
public override ICollection<SemanticContext> Operands
{
get
{
return Arrays.AsList(opnds);
}
}
public override bool Equals(object obj)
{
if (this == obj)
{
return true;
}
if (!(obj is SemanticContext.AND))
{
return false;
}
SemanticContext.AND other = (SemanticContext.AND)obj;
return Arrays.Equals(this.opnds, other.opnds);
}
public override int GetHashCode()
{
return MurmurHash.HashCode(opnds, typeof(SemanticContext.AND).GetHashCode());
}
public override bool Eval<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
foreach (SemanticContext opnd in opnds)
{
if (!opnd.Eval(parser, parserCallStack))
{
return false;
}
}
return true;
}
public override SemanticContext EvalPrecedence<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
bool differs = false;
IList<SemanticContext> operands = new List<SemanticContext>();
foreach (SemanticContext context in opnds)
{
SemanticContext evaluated = context.EvalPrecedence(parser, parserCallStack);
differs |= (evaluated != context);
if (evaluated == null)
{
// The AND context is false if any element is false
return null;
}
else
{
if (evaluated != NONE)
{
// Reduce the result by skipping true elements
operands.Add(evaluated);
}
}
}
if (!differs)
{
return this;
}
if (operands.Count == 0)
{
// all elements were true, so the AND context is true
return NONE;
}
SemanticContext result = operands[0];
for (int i = 1; i < operands.Count; i++)
{
result = SemanticContext.AndOp(result, operands[i]);
}
return result;
}
public override string ToString()
{
return Utils.Join("&&", opnds);
}
}
internal class OR : SemanticContext.Operator
{
[NotNull]
public readonly SemanticContext[] opnds;
public OR(SemanticContext a, SemanticContext b)
{
HashSet<SemanticContext> operands = new HashSet<SemanticContext>();
if (a is SemanticContext.OR)
{
operands.UnionWith(((OR)a).opnds);
}
else
{
operands.Add(a);
}
if (b is SemanticContext.OR)
{
operands.UnionWith(((OR)b).opnds);
}
else
{
operands.Add(b);
}
IList<SemanticContext.PrecedencePredicate> precedencePredicates = FilterPrecedencePredicates(operands);
if (precedencePredicates.Count > 0)
{
// interested in the transition with the highest precedence
SemanticContext.PrecedencePredicate reduced = precedencePredicates.Max();
operands.Add(reduced);
}
this.opnds = operands.ToArray();
}
public override ICollection<SemanticContext> Operands
{
get
{
return Arrays.AsList(opnds);
}
}
public override bool Equals(object obj)
{
if (this == obj)
{
return true;
}
if (!(obj is SemanticContext.OR))
{
return false;
}
SemanticContext.OR other = (SemanticContext.OR)obj;
return Arrays.Equals(this.opnds, other.opnds);
}
public override int GetHashCode()
{
return MurmurHash.HashCode(opnds, typeof(SemanticContext.OR).GetHashCode());
}
public override bool Eval<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
foreach (SemanticContext opnd in opnds)
{
if (opnd.Eval(parser, parserCallStack))
{
return true;
}
}
return false;
}
public override SemanticContext EvalPrecedence<Symbol, ATNInterpreter>(Recognizer<Symbol, ATNInterpreter> parser, RuleContext parserCallStack)
{
bool differs = false;
IList<SemanticContext> operands = new List<SemanticContext>();
foreach (SemanticContext context in opnds)
{
SemanticContext evaluated = context.EvalPrecedence(parser, parserCallStack);
differs |= (evaluated != context);
if (evaluated == NONE)
{
// The OR context is true if any element is true
return NONE;
}
else
{
if (evaluated != null)
{
// Reduce the result by skipping false elements
operands.Add(evaluated);
}
}
}
if (!differs)
{
return this;
}
if (operands.Count == 0)
{
// all elements were false, so the OR context is false
return null;
}
SemanticContext result = operands[0];
for (int i = 1; i < operands.Count; i++)
{
result = SemanticContext.OrOp(result, operands[i]);
}
return result;
}
public override string ToString()
{
return Utils.Join("||", opnds);
}
}
public static SemanticContext AndOp(SemanticContext a, SemanticContext b)
{
if (a == null || a == NONE)
{
return b;
}
if (b == null || b == NONE)
{
return a;
}
SemanticContext.AND result = new SemanticContext.AND(a, b);
if (result.opnds.Length == 1)
{
return result.opnds[0];
}
return result;
}
public static SemanticContext OrOp(SemanticContext a, SemanticContext b)
{
if (a == null)
{
return b;
}
if (b == null)
{
return a;
}
if (a == NONE || b == NONE)
{
return NONE;
}
SemanticContext.OR result = new SemanticContext.OR(a, b);
if (result.opnds.Length == 1)
{
return result.opnds[0];
}
return result;
}
private static IList<SemanticContext.PrecedencePredicate> FilterPrecedencePredicates(HashSet<SemanticContext> collection)
{
if (!collection.OfType<PrecedencePredicate>().Any())
Collections.EmptyList<PrecedencePredicate>();
List<PrecedencePredicate> result = collection.OfType<PrecedencePredicate>().ToList();
#if NET40PLUS
collection.ExceptWith(result);
#else
collection.ExceptWith(result.Cast<SemanticContext>());
#endif
return result;
}
}
}

Просмотреть файл

@ -0,0 +1,59 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>A transition containing a set of values.</summary>
/// <remarks>A transition containing a set of values.</remarks>
internal class SetTransition : Transition
{
[NotNull]
public readonly IntervalSet set;
public SetTransition(ATNState target, IntervalSet set)
: base(target)
{
// TODO (sam): should we really allow null here?
if (set == null)
{
set = IntervalSet.Of(TokenConstants.InvalidType);
}
this.set = set;
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.SET;
}
}
public override IntervalSet Label
{
get
{
return set;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return set.Contains(symbol);
}
[return: NotNull]
public override string ToString()
{
return set.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,33 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <author>Sam Harwell</author>
internal class SimulatorState
{
public readonly ParserRuleContext outerContext;
public readonly DFAState s0;
public readonly bool useContext;
public readonly ParserRuleContext remainingOuterContext;
public SimulatorState(ParserRuleContext outerContext, DFAState s0, bool useContext, ParserRuleContext remainingOuterContext)
{
this.outerContext = outerContext != null ? outerContext : ParserRuleContext.EmptyContext;
this.s0 = s0;
this.useContext = useContext;
this.remainingOuterContext = remainingOuterContext;
}
}
}

Просмотреть файл

@ -0,0 +1,102 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Atn
{
#pragma warning disable 0659 // 'class' overrides Object.Equals(object o) but does not override Object.GetHashCode()
internal class SingletonPredictionContext : PredictionContext
{
public static PredictionContext Create(PredictionContext parent, int returnState)
{
if (returnState == EMPTY_RETURN_STATE && parent == null)
{
// someone can pass in the bits of an array ctx that mean $
return PredictionContext.EMPTY;
}
return new SingletonPredictionContext(parent, returnState);
}
[NotNull]
public readonly PredictionContext parent;
public readonly int returnState;
internal SingletonPredictionContext(PredictionContext parent, int returnState)
: base(CalculateHashCode(parent, returnState))
{
this.parent = parent;
this.returnState = returnState;
}
public override PredictionContext GetParent(int index)
{
System.Diagnostics.Debug.Assert(index == 0);
return parent;
}
public override int GetReturnState(int index)
{
System.Diagnostics.Debug.Assert(index == 0);
return returnState;
}
public override int Size
{
get
{
return 1;
}
}
public override bool IsEmpty
{
get
{
return false;
}
}
public override bool Equals(object o)
{
if (o == this)
{
return true;
}
else
{
if (!(o is Antlr4.Runtime.Atn.SingletonPredictionContext))
{
return false;
}
}
if (this.GetHashCode() != o.GetHashCode())
{
return false;
}
Antlr4.Runtime.Atn.SingletonPredictionContext other = (Antlr4.Runtime.Atn.SingletonPredictionContext)o;
return returnState == other.returnState && parent.Equals(other.parent);
}
public override string ToString()
{
string up = parent != null ? parent.ToString() : "";
if (up.Length == 0)
{
if (returnState == EMPTY_RETURN_STATE)
{
return "$";
}
return returnState.ToString();
}
return returnState.ToString() + " " + up;
}
}
}

Просмотреть файл

@ -0,0 +1,25 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>The block that begins a closure loop.</summary>
/// <remarks>The block that begins a closure loop.</remarks>
internal sealed class StarBlockStartState : BlockStartState
{
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.StarBlockStart;
}
}
}
}

Просмотреть файл

@ -0,0 +1,40 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
namespace Antlr4.Runtime.Atn
{
internal sealed class StarLoopEntryState : DecisionState
{
public StarLoopbackState loopBackState;
/// <summary>
/// Indicates whether this state can benefit from a precedence DFA during SLL
/// decision making.
/// </summary>
/// <remarks>
/// Indicates whether this state can benefit from a precedence DFA during SLL
/// decision making.
/// <p>This is a computed property that is calculated during ATN deserialization
/// and stored for use in
/// <see cref="ParserATNSimulator"/>
/// and
/// <see cref="Antlr4.Runtime.ParserInterpreter"/>
/// .</p>
/// </remarks>
/// <seealso cref="Antlr4.Runtime.Dfa.DFA.IsPrecedenceDfa()"/>
public bool isPrecedenceDecision;
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.StarLoopEntry;
}
}
}
}

Просмотреть файл

@ -0,0 +1,29 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
namespace Antlr4.Runtime.Atn
{
internal sealed class StarLoopbackState : ATNState
{
public StarLoopEntryState LoopEntryState
{
get
{
return (StarLoopEntryState)Transition(0).target;
}
}
public override Antlr4.Runtime.Atn.StateType StateType
{
get
{
return Antlr4.Runtime.Atn.StateType.StarLoopBack;
}
}
}
}

Просмотреть файл

@ -0,0 +1,27 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
namespace Antlr4.Runtime.Atn
{
internal enum StateType
{
InvalidType,
Basic,
RuleStart,
BlockStart,
PlusBlockStart,
StarBlockStart,
TokenStart,
RuleStop,
BlockEnd,
StarLoopBack,
StarLoopEntry,
PlusLoopBack,
LoopEnd
}
}

Просмотреть файл

@ -0,0 +1,22 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
namespace Antlr4.Runtime.Atn
{
/// <summary>The Tokens rule start state linking to each lexer rule start state</summary>
internal sealed class TokensStartState : DecisionState
{
public override StateType StateType
{
get
{
return StateType.TokenStart;
}
}
}
}

Просмотреть файл

@ -0,0 +1,84 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.ObjectModel;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Atn
{
/// <summary>An ATN transition between any two ATN states.</summary>
/// <remarks>
/// An ATN transition between any two ATN states. Subclasses define
/// atom, set, epsilon, action, predicate, rule transitions.
/// <p>This is a one way link. It emanates from a state (usually via a list of
/// transitions) and has a target state.</p>
/// <p>Since we never have to change the ATN transitions once we construct it,
/// we can fix these transitions as specific classes. The DFA transitions
/// on the other hand need to update the labels as it adds transitions to
/// the states. We'll use the term Edge for the DFA to distinguish them from
/// ATN transitions.</p>
/// </remarks>
internal abstract class Transition
{
public static readonly ReadOnlyCollection<string> serializationNames = new ReadOnlyCollection<string>(Arrays.AsList("INVALID", "EPSILON", "RANGE", "RULE", "PREDICATE", "ATOM", "ACTION", "SET", "NOT_SET", "WILDCARD", "PRECEDENCE"));
/// <summary>The target of this transition.</summary>
/// <remarks>The target of this transition.</remarks>
[NotNull]
public ATNState target;
protected internal Transition(ATNState target)
{
if (target == null)
{
throw new ArgumentNullException("target cannot be null.");
}
this.target = target;
}
public abstract TransitionType TransitionType
{
get;
}
/// <summary>Determines if the transition is an "epsilon" transition.</summary>
/// <remarks>
/// Determines if the transition is an "epsilon" transition.
/// <p>The default implementation returns
/// <see langword="false"/>
/// .</p>
/// </remarks>
/// <returns>
///
/// <see langword="true"/>
/// if traversing this transition in the ATN does not
/// consume an input symbol; otherwise,
/// <see langword="false"/>
/// if traversing this
/// transition consumes (matches) an input symbol.
/// </returns>
public virtual bool IsEpsilon
{
get
{
return false;
}
}
public virtual IntervalSet Label
{
get
{
return null;
}
}
public abstract bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol);
}
}

Просмотреть файл

@ -0,0 +1,25 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
namespace Antlr4.Runtime.Atn
{
internal enum TransitionType
{
INVALID,
EPSILON,
RANGE,
RULE,
PREDICATE,
ATOM,
ACTION,
SET,
NOT_SET,
WILDCARD,
PRECEDENCE
}
}

Просмотреть файл

@ -0,0 +1,38 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Atn
{
internal sealed class WildcardTransition : Transition
{
public WildcardTransition(ATNState target)
: base(target)
{
}
public override Antlr4.Runtime.Atn.TransitionType TransitionType
{
get
{
return Antlr4.Runtime.Atn.TransitionType.WILDCARD;
}
}
public override bool Matches(int symbol, int minVocabSymbol, int maxVocabSymbol)
{
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol;
}
[return: NotNull]
public override string ToString()
{
return ".";
}
}
}

Просмотреть файл

@ -0,0 +1,97 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This implementation of
/// <see cref="IAntlrErrorStrategy"/>
/// responds to syntax errors
/// by immediately canceling the parse operation with a
/// <see cref="ParseCanceledException"/>
/// . The implementation ensures that the
/// <see cref="ParserRuleContext.exception"/>
/// field is set for all parse tree nodes
/// that were not completed prior to encountering the error.
/// <p>
/// This error strategy is useful in the following scenarios.</p>
/// <ul>
/// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
/// stage of two-stage parsing to immediately terminate if an error is
/// encountered, and immediately fall back to the second stage. In addition to
/// avoiding wasted work by attempting to recover from errors here, the empty
/// implementation of
/// <see cref="Sync(Parser)"/>
/// improves the performance of
/// the first stage.</li>
/// <li><strong>Silent validation:</strong> When syntax errors are not being
/// reported or logged, and the parse result is simply ignored if errors occur,
/// the
/// <see cref="BailErrorStrategy"/>
/// avoids wasting work on recovering from errors
/// when the result will be ignored either way.</li>
/// </ul>
/// <p>
/// <c>myparser.setErrorHandler(new BailErrorStrategy());</c>
/// </p>
/// </summary>
/// <seealso cref="Parser.ErrorHandler"/>
internal class BailErrorStrategy : DefaultErrorStrategy
{
/// <summary>
/// Instead of recovering from exception
/// <paramref name="e"/>
/// , re-throw it wrapped
/// in a
/// <see cref="ParseCanceledException"/>
/// so it is not caught by the
/// rule function catches. Use
/// <see cref="System.Exception.InnerException()"/>
/// to get the
/// original
/// <see cref="RecognitionException"/>
/// .
/// </summary>
public override void Recover(Parser recognizer, RecognitionException e)
{
for (ParserRuleContext context = recognizer.Context; context != null; context = ((ParserRuleContext)context.Parent))
{
context.exception = e;
}
throw new ParseCanceledException(e);
}
/// <summary>
/// Make sure we don't attempt to recover inline; if the parser
/// successfully recovers, it won't throw an exception.
/// </summary>
/// <remarks>
/// Make sure we don't attempt to recover inline; if the parser
/// successfully recovers, it won't throw an exception.
/// </remarks>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
public override IToken RecoverInline(Parser recognizer)
{
InputMismatchException e = new InputMismatchException(recognizer);
for (ParserRuleContext context = recognizer.Context; context != null; context = ((ParserRuleContext)context.Parent))
{
context.exception = e;
}
throw new ParseCanceledException(e);
}
/// <summary>Make sure we don't attempt to recover from problems in subrules.</summary>
/// <remarks>Make sure we don't attempt to recover from problems in subrules.</remarks>
public override void Sync(Parser recognizer)
{
}
}
}

Просмотреть файл

@ -0,0 +1,42 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
using System.IO;
namespace Antlr4.Runtime
{
/// <summary>
/// Provides an empty default implementation of
/// <see cref="IAntlrErrorListener{Symbol}"/>
/// . The
/// default implementation of each method does nothing, but can be overridden as
/// necessary.
/// </summary>
/// <author>Sam Harwell</author>
internal class BaseErrorListener : IParserErrorListener
{
public virtual void SyntaxError(TextWriter output, IRecognizer recognizer, IToken offendingSymbol, int line, int charPositionInLine, string msg, RecognitionException e)
{
}
public virtual void ReportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs)
{
}
public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState)
{
}
public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState)
{
}
}
}

Просмотреть файл

@ -0,0 +1,704 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This implementation of
/// <see cref="ITokenStream"/>
/// loads tokens from a
/// <see cref="ITokenSource"/>
/// on-demand, and places the tokens in a buffer to provide
/// access to any previous token by index.
/// <p>
/// This token stream ignores the value of
/// <see cref="IToken.Channel()"/>
/// . If your
/// parser requires the token stream filter tokens to only those on a particular
/// channel, such as
/// <see cref="TokenConstants.DefaultChannel"/>
/// or
/// <see cref="TokenConstants.HiddenChannel"/>
/// , use a filtering token stream such a
/// <see cref="CommonTokenStream"/>
/// .</p>
/// </summary>
internal class BufferedTokenStream : ITokenStream
{
/// <summary>
/// The
/// <see cref="ITokenSource"/>
/// from which tokens for this stream are fetched.
/// </summary>
[NotNull]
private ITokenSource _tokenSource;
/// <summary>A collection of all tokens fetched from the token source.</summary>
/// <remarks>
/// A collection of all tokens fetched from the token source. The list is
/// considered a complete view of the input once
/// <see cref="fetchedEOF"/>
/// is set
/// to
/// <see langword="true"/>
/// .
/// </remarks>
protected internal IList<IToken> tokens = new List<IToken>(100);
/// <summary>
/// The index into
/// <see cref="tokens"/>
/// of the current token (next token to
/// <see cref="Consume()"/>
/// ).
/// <see cref="tokens"/>
/// <c>[</c>
/// <see cref="p"/>
/// <c>]</c>
/// should be
/// <see cref="LT(int)">LT(1)</see>
/// .
/// <p>This field is set to -1 when the stream is first constructed or when
/// <see cref="SetTokenSource(ITokenSource)"/>
/// is called, indicating that the first token has
/// not yet been fetched from the token source. For additional information,
/// see the documentation of
/// <see cref="IIntStream"/>
/// for a description of
/// Initializing Methods.</p>
/// </summary>
protected internal int p = -1;
/// <summary>
/// Indicates whether the
/// <see cref="TokenConstants.EOF"/>
/// token has been fetched from
/// <see cref="_tokenSource"/>
/// and added to
/// <see cref="tokens"/>
/// . This field improves
/// performance for the following cases:
/// <ul>
/// <li>
/// <see cref="Consume()"/>
/// : The lookahead check in
/// <see cref="Consume()"/>
/// to prevent
/// consuming the EOF symbol is optimized by checking the values of
/// <see cref="fetchedEOF"/>
/// and
/// <see cref="p"/>
/// instead of calling
/// <see cref="LA(int)"/>
/// .</li>
/// <li>
/// <see cref="Fetch(int)"/>
/// : The check to prevent adding multiple EOF symbols into
/// <see cref="tokens"/>
/// is trivial with this field.</li>
/// </ul>
/// </summary>
protected internal bool fetchedEOF;
public BufferedTokenStream(ITokenSource tokenSource)
{
if (tokenSource == null)
{
throw new ArgumentNullException("tokenSource cannot be null");
}
this._tokenSource = tokenSource;
}
public virtual ITokenSource TokenSource
{
get
{
return _tokenSource;
}
}
public virtual int Index
{
get
{
return p;
}
}
public virtual int Mark()
{
return 0;
}
public virtual void Release(int marker)
{
}
// no resources to release
public virtual void Reset()
{
Seek(0);
}
public virtual void Seek(int index)
{
LazyInit();
p = AdjustSeekIndex(index);
}
public virtual int Size
{
get
{
return tokens.Count;
}
}
public virtual void Consume()
{
bool skipEofCheck;
if (p >= 0)
{
if (fetchedEOF)
{
// the last token in tokens is EOF. skip check if p indexes any
// fetched token except the last.
skipEofCheck = p < tokens.Count - 1;
}
else
{
// no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = p < tokens.Count;
}
}
else
{
// not yet initialized
skipEofCheck = false;
}
if (!skipEofCheck && LA(1) == IntStreamConstants.EOF)
{
throw new InvalidOperationException("cannot consume EOF");
}
if (Sync(p + 1))
{
p = AdjustSeekIndex(p + 1);
}
}
/// <summary>
/// Make sure index
/// <paramref name="i"/>
/// in tokens has a token.
/// </summary>
/// <returns>
///
/// <see langword="true"/>
/// if a token is located at index
/// <paramref name="i"/>
/// , otherwise
/// <see langword="false"/>
/// .
/// </returns>
/// <seealso cref="Get(int)"/>
protected internal virtual bool Sync(int i)
{
System.Diagnostics.Debug.Assert(i >= 0);
int n = i - tokens.Count + 1;
// how many more elements we need?
//System.out.println("sync("+i+") needs "+n);
if (n > 0)
{
int fetched = Fetch(n);
return fetched >= n;
}
return true;
}
/// <summary>
/// Add
/// <paramref name="n"/>
/// elements to buffer.
/// </summary>
/// <returns>The actual number of elements added to the buffer.</returns>
protected internal virtual int Fetch(int n)
{
if (fetchedEOF)
{
return 0;
}
for (int i = 0; i < n; i++)
{
IToken t = _tokenSource.NextToken();
if (t is IWritableToken)
{
((IWritableToken)t).TokenIndex = tokens.Count;
}
tokens.Add(t);
if (t.Type == TokenConstants.EOF)
{
fetchedEOF = true;
return i + 1;
}
}
return n;
}
public virtual IToken Get(int i)
{
if (i < 0 || i >= tokens.Count)
{
throw new ArgumentOutOfRangeException("token index " + i + " out of range 0.." + (tokens.Count - 1));
}
return tokens[i];
}
/// <summary>Get all tokens from start..stop inclusively.</summary>
/// <remarks>Get all tokens from start..stop inclusively.</remarks>
public virtual IList<IToken> Get(int start, int stop)
{
if (start < 0 || stop < 0)
{
return null;
}
LazyInit();
IList<IToken> subset = new List<IToken>();
if (stop >= tokens.Count)
{
stop = tokens.Count - 1;
}
for (int i = start; i <= stop; i++)
{
IToken t = tokens[i];
if (t.Type == TokenConstants.EOF)
{
break;
}
subset.Add(t);
}
return subset;
}
public virtual int LA(int i)
{
return LT(i).Type;
}
protected internal virtual IToken Lb(int k)
{
if ((p - k) < 0)
{
return null;
}
return tokens[p - k];
}
[return: NotNull]
public virtual IToken LT(int k)
{
LazyInit();
if (k == 0)
{
return null;
}
if (k < 0)
{
return Lb(-k);
}
int i = p + k - 1;
Sync(i);
if (i >= tokens.Count)
{
// return EOF token
// EOF must be last token
return tokens[tokens.Count - 1];
}
// if ( i>range ) range = i;
return tokens[i];
}
/// <summary>
/// Allowed derived classes to modify the behavior of operations which change
/// the current stream position by adjusting the target token index of a seek
/// operation.
/// </summary>
/// <remarks>
/// Allowed derived classes to modify the behavior of operations which change
/// the current stream position by adjusting the target token index of a seek
/// operation. The default implementation simply returns
/// <paramref name="i"/>
/// . If an
/// exception is thrown in this method, the current stream index should not be
/// changed.
/// <p>For example,
/// <see cref="CommonTokenStream"/>
/// overrides this method to ensure that
/// the seek target is always an on-channel token.</p>
/// </remarks>
/// <param name="i">The target token index.</param>
/// <returns>The adjusted target token index.</returns>
protected internal virtual int AdjustSeekIndex(int i)
{
return i;
}
protected internal void LazyInit()
{
if (p == -1)
{
Setup();
}
}
protected internal virtual void Setup()
{
Sync(0);
p = AdjustSeekIndex(0);
}
/// <summary>Reset this token stream by setting its token source.</summary>
/// <remarks>Reset this token stream by setting its token source.</remarks>
public virtual void SetTokenSource(ITokenSource tokenSource)
{
this._tokenSource = tokenSource;
tokens.Clear();
p = -1;
this.fetchedEOF = false;
}
public virtual IList<IToken> GetTokens()
{
return tokens;
}
public virtual IList<IToken> GetTokens(int start, int stop)
{
return GetTokens(start, stop, null);
}
/// <summary>
/// Given a start and stop index, return a
/// <c>List</c>
/// of all tokens in
/// the token type
/// <c>BitSet</c>
/// . Return
/// <see langword="null"/>
/// if no tokens were found. This
/// method looks at both on and off channel tokens.
/// </summary>
public virtual IList<IToken> GetTokens(int start, int stop, BitSet types)
{
LazyInit();
if (start < 0 || stop >= tokens.Count || stop < 0 || start >= tokens.Count)
{
throw new ArgumentOutOfRangeException("start " + start + " or stop " + stop + " not in 0.." + (tokens.Count - 1));
}
if (start > stop)
{
return null;
}
// list = tokens[start:stop]:{T t, t.getType() in types}
IList<IToken> filteredTokens = new List<IToken>();
for (int i = start; i <= stop; i++)
{
IToken t = tokens[i];
if (types == null || types.Get(t.Type))
{
filteredTokens.Add(t);
}
}
if (filteredTokens.Count == 0)
{
filteredTokens = null;
}
return filteredTokens;
}
public virtual IList<IToken> GetTokens(int start, int stop, int ttype)
{
BitSet s = new BitSet(ttype);
s.Set(ttype);
return GetTokens(start, stop, s);
}
/// <summary>Given a starting index, return the index of the next token on channel.</summary>
/// <remarks>
/// Given a starting index, return the index of the next token on channel.
/// Return
/// <paramref name="i"/>
/// if
/// <c>tokens[i]</c>
/// is on channel. Return the index of
/// the EOF token if there are no tokens on channel between
/// <paramref name="i"/>
/// and
/// EOF.
/// </remarks>
protected internal virtual int NextTokenOnChannel(int i, int channel)
{
Sync(i);
if (i >= Size)
{
return Size - 1;
}
IToken token = tokens[i];
while (token.Channel != channel)
{
if (token.Type == TokenConstants.EOF)
{
return i;
}
i++;
Sync(i);
token = tokens[i];
}
return i;
}
/// <summary>
/// Given a starting index, return the index of the previous token on
/// channel.
/// </summary>
/// <remarks>
/// Given a starting index, return the index of the previous token on
/// channel. Return
/// <paramref name="i"/>
/// if
/// <c>tokens[i]</c>
/// is on channel. Return -1
/// if there are no tokens on channel between
/// <paramref name="i"/>
/// and 0.
/// <p>
/// If
/// <paramref name="i"/>
/// specifies an index at or after the EOF token, the EOF token
/// index is returned. This is due to the fact that the EOF token is treated
/// as though it were on every channel.</p>
/// </remarks>
protected internal virtual int PreviousTokenOnChannel(int i, int channel)
{
Sync(i);
if (i >= Size)
{
// the EOF token is on every channel
return Size - 1;
}
while (i >= 0)
{
IToken token = tokens[i];
if (token.Type == TokenConstants.EOF || token.Channel == channel)
{
return i;
}
i--;
}
return i;
}
/// <summary>
/// Collect all tokens on specified channel to the right of
/// the current token up until we see a token on
/// <see cref="Lexer.DefaultTokenChannel"/>
/// or
/// EOF. If
/// <paramref name="channel"/>
/// is
/// <c>-1</c>
/// , find any non default channel token.
/// </summary>
public virtual IList<IToken> GetHiddenTokensToRight(int tokenIndex, int channel)
{
LazyInit();
if (tokenIndex < 0 || tokenIndex >= tokens.Count)
{
throw new ArgumentOutOfRangeException(tokenIndex + " not in 0.." + (tokens.Count - 1));
}
int nextOnChannel = NextTokenOnChannel(tokenIndex + 1, Lexer.DefaultTokenChannel);
int to;
int from = tokenIndex + 1;
// if none onchannel to right, nextOnChannel=-1 so set to = last token
if (nextOnChannel == -1)
{
to = Size - 1;
}
else
{
to = nextOnChannel;
}
return FilterForChannel(from, to, channel);
}
/// <summary>
/// Collect all hidden tokens (any off-default channel) to the right of
/// the current token up until we see a token on
/// <see cref="Lexer.DefaultTokenChannel"/>
/// or EOF.
/// </summary>
public virtual IList<IToken> GetHiddenTokensToRight(int tokenIndex)
{
return GetHiddenTokensToRight(tokenIndex, -1);
}
/// <summary>
/// Collect all tokens on specified channel to the left of
/// the current token up until we see a token on
/// <see cref="Lexer.DefaultTokenChannel"/>
/// .
/// If
/// <paramref name="channel"/>
/// is
/// <c>-1</c>
/// , find any non default channel token.
/// </summary>
public virtual IList<IToken> GetHiddenTokensToLeft(int tokenIndex, int channel)
{
LazyInit();
if (tokenIndex < 0 || tokenIndex >= tokens.Count)
{
throw new ArgumentOutOfRangeException(tokenIndex + " not in 0.." + (tokens.Count - 1));
}
if (tokenIndex == 0)
{
// obviously no tokens can appear before the first token
return null;
}
int prevOnChannel = PreviousTokenOnChannel(tokenIndex - 1, Lexer.DefaultTokenChannel);
if (prevOnChannel == tokenIndex - 1)
{
return null;
}
// if none onchannel to left, prevOnChannel=-1 then from=0
int from = prevOnChannel + 1;
int to = tokenIndex - 1;
return FilterForChannel(from, to, channel);
}
/// <summary>
/// Collect all hidden tokens (any off-default channel) to the left of
/// the current token up until we see a token on
/// <see cref="Lexer.DefaultTokenChannel"/>
/// .
/// </summary>
public virtual IList<IToken> GetHiddenTokensToLeft(int tokenIndex)
{
return GetHiddenTokensToLeft(tokenIndex, -1);
}
protected internal virtual IList<IToken> FilterForChannel(int from, int to, int channel)
{
IList<IToken> hidden = new List<IToken>();
for (int i = from; i <= to; i++)
{
IToken t = tokens[i];
if (channel == -1)
{
if (t.Channel != Lexer.DefaultTokenChannel)
{
hidden.Add(t);
}
}
else
{
if (t.Channel == channel)
{
hidden.Add(t);
}
}
}
if (hidden.Count == 0)
{
return null;
}
return hidden;
}
public virtual string SourceName
{
get
{
return _tokenSource.SourceName;
}
}
/// <summary>Get the text of all tokens in this buffer.</summary>
/// <remarks>Get the text of all tokens in this buffer.</remarks>
[return: NotNull]
public virtual string GetText()
{
Fill();
return GetText(Interval.Of(0, Size - 1));
}
[return: NotNull]
public virtual string GetText(Interval interval)
{
int start = interval.a;
int stop = interval.b;
if (start < 0 || stop < 0)
{
return string.Empty;
}
LazyInit();
if (stop >= tokens.Count)
{
stop = tokens.Count - 1;
}
StringBuilder buf = new StringBuilder();
for (int i = start; i <= stop; i++)
{
IToken t = tokens[i];
if (t.Type == TokenConstants.EOF)
{
break;
}
buf.Append(t.Text);
}
return buf.ToString();
}
[return: NotNull]
public virtual string GetText(RuleContext ctx)
{
return GetText(ctx.SourceInterval);
}
[return: NotNull]
public virtual string GetText(IToken start, IToken stop)
{
if (start != null && stop != null)
{
return GetText(Interval.Of(start.TokenIndex, stop.TokenIndex));
}
return string.Empty;
}
/// <summary>Get all tokens from lexer until EOF.</summary>
/// <remarks>Get all tokens from lexer until EOF.</remarks>
public virtual void Fill()
{
LazyInit();
int blockSize = 1000;
while (true)
{
int fetched = Fetch(blockSize);
if (fetched < blockSize)
{
return;
}
}
}
}
}

Просмотреть файл

@ -0,0 +1,96 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.IO;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>Utility class to create <see cref="ICharStream"/>s from various sources of
/// string data.
///
/// The methods in this utility class support the full range of
/// Unicode code points up to U+10FFFF, unlike <see cref="AntlrInputStream"/>,
/// which is limited to 16-bit Unicode code units up to U+FFFF.
/// </summary>
internal static class CharStreams
{
/// <summary>Creates an <see cref="ICharStream"/> given a path to a UTF-8
/// encoded file on disk.
///
/// Reads the entire contents of the file into the result before returning.
/// </summary>
public static ICharStream fromPath(string path)
{
return fromPath(path, Encoding.UTF8);
}
/// <summary>Creates an <see cref="ICharStream"/> given a path to a
/// file on disk and the encoding of the bytes contained in the file.
///
/// Reads the entire contents of the file into the result before returning.
/// </summary>
public static ICharStream fromPath(string path, Encoding encoding)
{
var pathContents = File.ReadAllText(path, encoding);
var result = new CodePointCharStream(pathContents);
result.name = path;
return result;
}
/// <summary>Creates an <see cref="ICharStream"/> given an opened
/// <see cref="TextReader"/>.
///
/// Reads the entire contents of the TextReader then closes the reader before returning.
/// </summary>
public static ICharStream fromTextReader(TextReader textReader)
{
try {
var textReaderContents = textReader.ReadToEnd();
return new CodePointCharStream(textReaderContents);
} finally {
textReader.Dispose();
}
}
/// <summary>Creates an <see cref="ICharStream"/> given an opened
/// <see cref="Stream"/> from which UTF-8 encoded bytes can be read.
///
/// Reads the entire contents of the stream into the result then
/// closes the stream before returning.
/// </summary>
public static ICharStream fromStream(Stream stream)
{
return fromStream(stream, Encoding.UTF8);
}
/// <summary>Creates an <see cref="ICharStream"/> given an opened
/// <see cref="Stream"/> as well as the encoding of the bytes
/// to be read from the stream.
///
/// Reads the entire contents of the stream into the result then
/// closes the stream before returning.
/// </summary>
public static ICharStream fromStream(Stream stream, Encoding encoding)
{
using (StreamReader sr = new StreamReader(stream, encoding, false)) {
return fromTextReader(sr);
}
}
/// <summary>Creates an <see cref="ICharStream"/> given a <see cref="string"/>.
/// </summary>
public static ICharStream fromstring(string s)
{
return new CodePointCharStream(s);
}
}
}

Просмотреть файл

@ -0,0 +1,356 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
[System.Serializable]
internal class CommonToken : IWritableToken
{
private const long serialVersionUID = -6708843461296520577L;
/// <summary>
/// An empty
/// <see cref="Tuple{T1, T2}"/>
/// which is used as the default value of
/// <see cref="source"/>
/// for tokens that do not have a source.
/// </summary>
protected internal static readonly Tuple<ITokenSource, ICharStream> EmptySource = Tuple.Create<ITokenSource, ICharStream>(null, null);
/// <summary>
/// This is the backing field for the <see cref="Type"/> property.
/// </summary>
private int _type;
/// <summary>
/// This is the backing field for the <see cref="Line"/> property.
/// </summary>
private int _line;
/// <summary>
/// This is the backing field for the <see cref="Column"/> property.
/// </summary>
protected internal int charPositionInLine = -1;
/// <summary>
/// This is the backing field for the <see cref="Channel"/> property.
/// </summary>
private int _channel = TokenConstants.DefaultChannel;
/// <summary>
/// This is the backing field for
/// <see cref="TokenSource()"/>
/// and
/// <see cref="InputStream()"/>
/// .
/// <p>
/// These properties share a field to reduce the memory footprint of
/// <see cref="CommonToken"/>
/// . Tokens created by a
/// <see cref="CommonTokenFactory"/>
/// from
/// the same source and input stream share a reference to the same
/// <see cref="Tuple{T1, T2}"/>
/// containing these values.</p>
/// </summary>
[NotNull]
protected internal Tuple<ITokenSource, ICharStream> source;
/// <summary>
/// This is the backing field for the <see cref="Text"/> property.
/// </summary>
/// <seealso cref="Text"/>
private string _text;
/// <summary>
/// This is the backing field for the <see cref="TokenIndex"/> property.
/// </summary>
protected internal int index = -1;
/// <summary>
/// This is the backing field for the <see cref="StartIndex"/> property.
/// </summary>
protected internal int start;
/// <summary>
/// This is the backing field for the <see cref="StopIndex"/> property.
/// </summary>
protected internal int stop;
/// <summary>
/// Constructs a new
/// <see cref="CommonToken"/>
/// with the specified token type.
/// </summary>
/// <param name="type">The token type.</param>
public CommonToken(int type)
{
// set to invalid position
this._type = type;
this.source = EmptySource;
}
public CommonToken(Tuple<ITokenSource, ICharStream> source, int type, int channel, int start, int stop)
{
this.source = source;
this._type = type;
this._channel = channel;
this.start = start;
this.stop = stop;
if (source.Item1 != null)
{
this._line = source.Item1.Line;
this.charPositionInLine = source.Item1.Column;
}
}
/// <summary>
/// Constructs a new
/// <see cref="CommonToken"/>
/// with the specified token type and
/// text.
/// </summary>
/// <param name="type">The token type.</param>
/// <param name="text">The text of the token.</param>
public CommonToken(int type, string text)
{
this._type = type;
this._channel = TokenConstants.DefaultChannel;
this._text = text;
this.source = EmptySource;
}
/// <summary>
/// Constructs a new
/// <see cref="CommonToken"/>
/// as a copy of another
/// <see cref="IToken"/>
/// .
/// <p>
/// If
/// <paramref name="oldToken"/>
/// is also a
/// <see cref="CommonToken"/>
/// instance, the newly
/// constructed token will share a reference to the
/// <see cref="Text()"/>
/// field and
/// the
/// <see cref="Tuple{T1, T2}"/>
/// stored in
/// <see cref="source"/>
/// . Otherwise,
/// <see cref="Text()"/>
/// will
/// be assigned the result of calling
/// <see cref="Text()"/>
/// , and
/// <see cref="source"/>
/// will be constructed from the result of
/// <see cref="IToken.TokenSource()"/>
/// and
/// <see cref="IToken.InputStream()"/>
/// .</p>
/// </summary>
/// <param name="oldToken">The token to copy.</param>
public CommonToken(IToken oldToken)
{
_type = oldToken.Type;
_line = oldToken.Line;
index = oldToken.TokenIndex;
charPositionInLine = oldToken.Column;
_channel = oldToken.Channel;
start = oldToken.StartIndex;
stop = oldToken.StopIndex;
if (oldToken is Antlr4.Runtime.CommonToken)
{
_text = ((Antlr4.Runtime.CommonToken)oldToken)._text;
source = ((Antlr4.Runtime.CommonToken)oldToken).source;
}
else
{
_text = oldToken.Text;
source = Tuple.Create(oldToken.TokenSource, oldToken.InputStream);
}
}
public virtual int Type
{
get
{
return _type;
}
set
{
this._type = value;
}
}
public virtual int Line
{
get
{
return _line;
}
set
{
this._line = value;
}
}
/// <summary>Explicitly set the text for this token.</summary>
/// <remarks>
/// Explicitly set the text for this token. If {code text} is not
/// <see langword="null"/>
/// , then
/// <see cref="Text()"/>
/// will return this value rather than
/// extracting the text from the input.
/// </remarks>
/// <value>
/// The explicit text of the token, or
/// <see langword="null"/>
/// if the text
/// should be obtained from the input along with the start and stop indexes
/// of the token.
/// </value>
public virtual string Text
{
get
{
if (_text != null)
{
return _text;
}
ICharStream input = InputStream;
if (input == null)
{
return null;
}
int n = input.Size;
if (start < n && stop < n)
{
return input.GetText(Interval.Of(start, stop));
}
else
{
return "<EOF>";
}
}
set
{
this._text = value;
}
}
public virtual int Column
{
get
{
return charPositionInLine;
}
set
{
int charPositionInLine = value;
this.charPositionInLine = charPositionInLine;
}
}
public virtual int Channel
{
get
{
return _channel;
}
set
{
this._channel = value;
}
}
public virtual int StartIndex
{
get
{
return start;
}
set
{
int start = value;
this.start = start;
}
}
public virtual int StopIndex
{
get
{
return stop;
}
set
{
int stop = value;
this.stop = stop;
}
}
public virtual int TokenIndex
{
get
{
return index;
}
set
{
int index = value;
this.index = index;
}
}
public virtual ITokenSource TokenSource
{
get
{
return source.Item1;
}
}
public virtual ICharStream InputStream
{
get
{
return source.Item2;
}
}
public override string ToString()
{
string channelStr = string.Empty;
if (_channel > 0)
{
channelStr = ",channel=" + _channel;
}
string txt = Text;
if (txt != null)
{
txt = txt.Replace("\n", "\\n");
txt = txt.Replace("\r", "\\r");
txt = txt.Replace("\t", "\\t");
}
else
{
txt = "<no text>";
}
return "[@" + TokenIndex + "," + start + ":" + stop + "='" + txt + "',<" + _type + ">" + channelStr + "," + _line + ":" + Column + "]";
}
}
}

Просмотреть файл

@ -0,0 +1,139 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This default implementation of
/// <see cref="ITokenFactory"/>
/// creates
/// <see cref="CommonToken"/>
/// objects.
/// </summary>
internal class CommonTokenFactory : ITokenFactory
{
/// <summary>
/// The default
/// <see cref="CommonTokenFactory"/>
/// instance.
/// <p>
/// This token factory does not explicitly copy token text when constructing
/// tokens.</p>
/// </summary>
public static readonly ITokenFactory Default = new Antlr4.Runtime.CommonTokenFactory();
/// <summary>
/// Indicates whether
/// <see cref="CommonToken.Text"/>
/// should be called after
/// constructing tokens to explicitly set the text. This is useful for cases
/// where the input stream might not be able to provide arbitrary substrings
/// of text from the input after the lexer creates a token (e.g. the
/// implementation of
/// <see cref="ICharStream.GetText(Antlr4.Runtime.Misc.Interval)"/>
/// in
/// <see cref="UnbufferedCharStream"/>
/// throws an
/// <see cref="System.NotSupportedException"/>
/// ). Explicitly setting the token text
/// allows
/// <see cref="IToken.Text()"/>
/// to be called at any time regardless of the
/// input stream implementation.
/// <p>
/// The default value is
/// <see langword="false"/>
/// to avoid the performance and memory
/// overhead of copying text for every token unless explicitly requested.</p>
/// </summary>
protected internal readonly bool copyText;
/// <summary>
/// Constructs a
/// <see cref="CommonTokenFactory"/>
/// with the specified value for
/// <see cref="copyText"/>
/// .
/// <p>
/// When
/// <paramref name="copyText"/>
/// is
/// <see langword="false"/>
/// , the
/// <see cref="Default"/>
/// instance
/// should be used instead of constructing a new instance.</p>
/// </summary>
/// <param name="copyText">
/// The value for
/// <see cref="copyText"/>
/// .
/// </param>
public CommonTokenFactory(bool copyText)
{
this.copyText = copyText;
}
/// <summary>
/// Constructs a
/// <see cref="CommonTokenFactory"/>
/// with
/// <see cref="copyText"/>
/// set to
/// <see langword="false"/>
/// .
/// <p>
/// The
/// <see cref="Default"/>
/// instance should be used instead of calling this
/// directly.</p>
/// </summary>
public CommonTokenFactory()
: this(false)
{
}
public virtual CommonToken Create(Tuple<ITokenSource, ICharStream> source, int type, string text, int channel, int start, int stop, int line, int charPositionInLine)
{
CommonToken t = new CommonToken(source, type, channel, start, stop);
t.Line = line;
t.Column = charPositionInLine;
if (text != null)
{
t.Text = text;
}
else
{
if (copyText && source.Item2 != null)
{
t.Text = source.Item2.GetText(Interval.Of(start, stop));
}
}
return t;
}
IToken ITokenFactory.Create(Tuple<ITokenSource, ICharStream> source, int type, string text, int channel, int start, int stop, int line, int charPositionInLine)
{
return Create(source, type, text, channel, start, stop, line, charPositionInLine);
}
public virtual CommonToken Create(int type, string text)
{
return new CommonToken(type, text);
}
IToken ITokenFactory.Create(int type, string text)
{
return Create(type, text);
}
}
}

Просмотреть файл

@ -0,0 +1,179 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This class extends
/// <see cref="BufferedTokenStream"/>
/// with functionality to filter
/// token streams to tokens on a particular channel (tokens where
/// <see cref="IToken.Channel()"/>
/// returns a particular value).
/// <p>
/// This token stream provides access to all tokens by index or when calling
/// methods like
/// <see cref="BufferedTokenStream.GetText()"/>
/// . The channel filtering is only used for code
/// accessing tokens via the lookahead methods
/// <see cref="BufferedTokenStream.LA(int)"/>
/// ,
/// <see cref="LT(int)"/>
/// , and
/// <see cref="Lb(int)"/>
/// .</p>
/// <p>
/// By default, tokens are placed on the default channel
/// (
/// <see cref="TokenConstants.DefaultChannel"/>
/// ), but may be reassigned by using the
/// <c>-&gt;channel(HIDDEN)</c>
/// lexer command, or by using an embedded action to
/// call
/// <see cref="Lexer.Channel"/>
/// .
/// </p>
/// <p>
/// Note: lexer rules which use the
/// <c>-&gt;skip</c>
/// lexer command or call
/// <see cref="Lexer.Skip()"/>
/// do not produce tokens at all, so input text matched by
/// such a rule will not be available as part of the token stream, regardless of
/// channel.</p>
/// </summary>
internal class CommonTokenStream : BufferedTokenStream
{
/// <summary>Specifies the channel to use for filtering tokens.</summary>
/// <remarks>
/// Specifies the channel to use for filtering tokens.
/// <p>
/// The default value is
/// <see cref="TokenConstants.DefaultChannel"/>
/// , which matches the
/// default channel assigned to tokens created by the lexer.</p>
/// </remarks>
protected internal int channel = TokenConstants.DefaultChannel;
/// <summary>
/// Constructs a new
/// <see cref="CommonTokenStream"/>
/// using the specified token
/// source and the default token channel (
/// <see cref="TokenConstants.DefaultChannel"/>
/// ).
/// </summary>
/// <param name="tokenSource">The token source.</param>
public CommonTokenStream(ITokenSource tokenSource)
: base(tokenSource)
{
}
/// <summary>
/// Constructs a new
/// <see cref="CommonTokenStream"/>
/// using the specified token
/// source and filtering tokens to the specified channel. Only tokens whose
/// <see cref="IToken.Channel()"/>
/// matches
/// <paramref name="channel"/>
/// or have the
/// <see cref="IToken.Type()"/>
/// equal to
/// <see cref="TokenConstants.EOF"/>
/// will be returned by the
/// token stream lookahead methods.
/// </summary>
/// <param name="tokenSource">The token source.</param>
/// <param name="channel">The channel to use for filtering tokens.</param>
public CommonTokenStream(ITokenSource tokenSource, int channel)
: this(tokenSource)
{
this.channel = channel;
}
protected internal override int AdjustSeekIndex(int i)
{
return NextTokenOnChannel(i, channel);
}
protected internal override IToken Lb(int k)
{
if (k == 0 || (p - k) < 0)
{
return null;
}
int i = p;
int n = 1;
// find k good tokens looking backwards
while (n <= k)
{
// skip off-channel tokens
i = PreviousTokenOnChannel(i - 1, channel);
n++;
}
if (i < 0)
{
return null;
}
return tokens[i];
}
public override IToken LT(int k)
{
//System.out.println("enter LT("+k+")");
LazyInit();
if (k == 0)
{
return null;
}
if (k < 0)
{
return Lb(-k);
}
int i = p;
int n = 1;
// we know tokens[p] is a good one
// find k good tokens
while (n < k)
{
// skip off-channel tokens, but make sure to not look past EOF
if (Sync(i + 1))
{
i = NextTokenOnChannel(i + 1, channel);
}
n++;
}
// if ( i>range ) range = i;
return tokens[i];
}
/// <summary>Count EOF just once.</summary>
/// <remarks>Count EOF just once.</remarks>
public virtual int GetNumberOfOnChannelTokens()
{
int n = 0;
Fill();
for (int i = 0; i < tokens.Count; i++)
{
IToken t = tokens[i];
if (t.Channel == channel)
{
n++;
}
if (t.Type == TokenConstants.EOF)
{
break;
}
}
return n;
}
}
}

Просмотреть файл

@ -0,0 +1,52 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#if !PORTABLE
using Antlr4.Runtime;
using Antlr4.Runtime.Sharpen;
using System.IO;
namespace Antlr4.Runtime
{
/// <author>Sam Harwell</author>
internal class ConsoleErrorListener<Symbol> : IAntlrErrorListener<Symbol>
{
/// <summary>
/// Provides a default instance of
/// <see cref="ConsoleErrorListener{Symbol}"/>
/// .
/// </summary>
public static readonly ConsoleErrorListener<Symbol> Instance = new ConsoleErrorListener<Symbol>();
/// <summary>
/// <inheritDoc/>
/// <p>
/// This implementation prints messages to
/// <see cref="System.Console.Error"/>
/// containing the
/// values of
/// <paramref name="line"/>
/// ,
/// <paramref name="charPositionInLine"/>
/// , and
/// <paramref name="msg"/>
/// using
/// the following format.</p>
/// <pre>
/// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
/// </pre>
/// </summary>
public virtual void SyntaxError(TextWriter output, IRecognizer recognizer, Symbol offendingSymbol, int line, int charPositionInLine, string msg, RecognitionException e)
{
output.WriteLine("line " + line + ":" + charPositionInLine + " " + msg);
}
}
}
#endif

Просмотреть файл

@ -0,0 +1,804 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This is the default implementation of
/// <see cref="IAntlrErrorStrategy"/>
/// used for
/// error reporting and recovery in ANTLR parsers.
/// </summary>
internal class DefaultErrorStrategy : IAntlrErrorStrategy
{
/// <summary>
/// Indicates whether the error strategy is currently "recovering from an
/// error".
/// </summary>
/// <remarks>
/// Indicates whether the error strategy is currently "recovering from an
/// error". This is used to suppress reporting multiple error messages while
/// attempting to recover from a detected syntax error.
/// </remarks>
/// <seealso cref="InErrorRecoveryMode(Parser)"/>
protected internal bool errorRecoveryMode = false;
/// <summary>The index into the input stream where the last error occurred.</summary>
/// <remarks>
/// The index into the input stream where the last error occurred.
/// This is used to prevent infinite loops where an error is found
/// but no token is consumed during recovery...another error is found,
/// ad nauseum. This is a failsafe mechanism to guarantee that at least
/// one token/tree node is consumed for two errors.
/// </remarks>
protected internal int lastErrorIndex = -1;
protected internal IntervalSet lastErrorStates;
/// <summary>
/// <inheritDoc/>
/// <p>The default implementation simply calls
/// <see cref="EndErrorCondition(Parser)"/>
/// to
/// ensure that the handler is not in error recovery mode.</p>
/// </summary>
public virtual void Reset(Parser recognizer)
{
EndErrorCondition(recognizer);
}
/// <summary>
/// This method is called to enter error recovery mode when a recognition
/// exception is reported.
/// </summary>
/// <remarks>
/// This method is called to enter error recovery mode when a recognition
/// exception is reported.
/// </remarks>
/// <param name="recognizer">the parser instance</param>
protected internal virtual void BeginErrorCondition(Parser recognizer)
{
errorRecoveryMode = true;
}
/// <summary><inheritDoc/></summary>
public virtual bool InErrorRecoveryMode(Parser recognizer)
{
return errorRecoveryMode;
}
/// <summary>
/// This method is called to leave error recovery mode after recovering from
/// a recognition exception.
/// </summary>
/// <remarks>
/// This method is called to leave error recovery mode after recovering from
/// a recognition exception.
/// </remarks>
/// <param name="recognizer"/>
protected internal virtual void EndErrorCondition(Parser recognizer)
{
errorRecoveryMode = false;
lastErrorStates = null;
lastErrorIndex = -1;
}
/// <summary>
/// <inheritDoc/>
/// <p>The default implementation simply calls
/// <see cref="EndErrorCondition(Parser)"/>
/// .</p>
/// </summary>
public virtual void ReportMatch(Parser recognizer)
{
EndErrorCondition(recognizer);
}
/// <summary>
/// <inheritDoc/>
/// <p>The default implementation returns immediately if the handler is already
/// in error recovery mode. Otherwise, it calls
/// <see cref="BeginErrorCondition(Parser)"/>
/// and dispatches the reporting task based on the runtime type of
/// <paramref name="e"/>
/// according to the following table.</p>
/// <ul>
/// <li>
/// <see cref="NoViableAltException"/>
/// : Dispatches the call to
/// <see cref="ReportNoViableAlternative(Parser, NoViableAltException)"/>
/// </li>
/// <li>
/// <see cref="InputMismatchException"/>
/// : Dispatches the call to
/// <see cref="ReportInputMismatch(Parser, InputMismatchException)"/>
/// </li>
/// <li>
/// <see cref="FailedPredicateException"/>
/// : Dispatches the call to
/// <see cref="ReportFailedPredicate(Parser, FailedPredicateException)"/>
/// </li>
/// <li>All other types: calls
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// to report
/// the exception</li>
/// </ul>
/// </summary>
public virtual void ReportError(Parser recognizer, RecognitionException e)
{
// if we've already reported an error and have not matched a token
// yet successfully, don't report any errors.
if (InErrorRecoveryMode(recognizer))
{
// System.err.print("[SPURIOUS] ");
return;
}
// don't report spurious errors
BeginErrorCondition(recognizer);
if (e is NoViableAltException)
{
ReportNoViableAlternative(recognizer, (NoViableAltException)e);
}
else
{
if (e is InputMismatchException)
{
ReportInputMismatch(recognizer, (InputMismatchException)e);
}
else
{
if (e is FailedPredicateException)
{
ReportFailedPredicate(recognizer, (FailedPredicateException)e);
}
else
{
#if !PORTABLE
System.Console.Error.WriteLine("unknown recognition error type: " + e.GetType().FullName);
#endif
NotifyErrorListeners(recognizer, e.Message, e);
}
}
}
}
protected internal virtual void NotifyErrorListeners(Parser recognizer, string message, RecognitionException e)
{
recognizer.NotifyErrorListeners(e.OffendingToken, message, e);
}
/// <summary>
/// <inheritDoc/>
/// <p>The default implementation resynchronizes the parser by consuming tokens
/// until we find one in the resynchronization set--loosely the set of tokens
/// that can follow the current rule.</p>
/// </summary>
public virtual void Recover(Parser recognizer, RecognitionException e)
{
// System.out.println("recover in "+recognizer.getRuleInvocationStack()+
// " index="+recognizer.getInputStream().index()+
// ", lastErrorIndex="+
// lastErrorIndex+
// ", states="+lastErrorStates);
if (lastErrorIndex == ((ITokenStream)recognizer.InputStream).Index && lastErrorStates != null && lastErrorStates.Contains(recognizer.State))
{
// uh oh, another error at same token index and previously-visited
// state in ATN; must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop; this is a failsafe.
// System.err.println("seen error condition before index="+
// lastErrorIndex+", states="+lastErrorStates);
// System.err.println("FAILSAFE consumes "+recognizer.getTokenNames()[recognizer.getInputStream().LA(1)]);
recognizer.Consume();
}
lastErrorIndex = ((ITokenStream)recognizer.InputStream).Index;
if (lastErrorStates == null)
{
lastErrorStates = new IntervalSet();
}
lastErrorStates.Add(recognizer.State);
IntervalSet followSet = GetErrorRecoverySet(recognizer);
ConsumeUntil(recognizer, followSet);
}
/// <summary>
/// The default implementation of
/// <see cref="IAntlrErrorStrategy.Sync(Parser)"/>
/// makes sure
/// that the current lookahead symbol is consistent with what were expecting
/// at this point in the ATN. You can call this anytime but ANTLR only
/// generates code to check before subrules/loops and each iteration.
/// <p>Implements Jim Idle's magic sync mechanism in closures and optional
/// subrules. E.g.,</p>
/// <pre>
/// a : sync ( stuff sync )* ;
/// sync : {consume to what can follow sync} ;
/// </pre>
/// At the start of a sub rule upon error,
/// <see cref="Sync(Parser)"/>
/// performs single
/// token deletion, if possible. If it can't do that, it bails on the current
/// rule and uses the default error recovery, which consumes until the
/// resynchronization set of the current rule.
/// <p>If the sub rule is optional (
/// <c>(...)?</c>
/// ,
/// <c>(...)*</c>
/// , or block
/// with an empty alternative), then the expected set includes what follows
/// the subrule.</p>
/// <p>During loop iteration, it consumes until it sees a token that can start a
/// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
/// stay in the loop as long as possible.</p>
/// <p><strong>ORIGINS</strong></p>
/// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
/// A single mismatch token or missing token would force the parser to bail
/// out of the entire rules surrounding the loop. So, for rule</p>
/// <pre>
/// classDef : 'class' ID '{' member* '}'
/// </pre>
/// input with an extra token between members would force the parser to
/// consume until it found the next class definition rather than the next
/// member definition of the current class.
/// <p>This functionality cost a little bit of effort because the parser has to
/// compare token set at the start of the loop and at each iteration. If for
/// some reason speed is suffering for you, you can turn off this
/// functionality by simply overriding this method as a blank { }.</p>
/// </summary>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
public virtual void Sync(Parser recognizer)
{
ATNState s = recognizer.Interpreter.atn.states[recognizer.State];
// System.err.println("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName());
// If already recovering, don't try to sync
if (InErrorRecoveryMode(recognizer))
{
return;
}
ITokenStream tokens = ((ITokenStream)recognizer.InputStream);
int la = tokens.LA(1);
// try cheaper subset first; might get lucky. seems to shave a wee bit off
var nextTokens = recognizer.Atn.NextTokens(s);
if (nextTokens.Contains(TokenConstants.EPSILON) || nextTokens.Contains(la))
{
return;
}
switch (s.StateType)
{
case StateType.BlockStart:
case StateType.StarBlockStart:
case StateType.PlusBlockStart:
case StateType.StarLoopEntry:
{
// report error and recover if possible
if (SingleTokenDeletion(recognizer) != null)
{
return;
}
throw new InputMismatchException(recognizer);
}
case StateType.PlusLoopBack:
case StateType.StarLoopBack:
{
// System.err.println("at loop back: "+s.getClass().getSimpleName());
ReportUnwantedToken(recognizer);
IntervalSet expecting = recognizer.GetExpectedTokens();
IntervalSet whatFollowsLoopIterationOrRule = expecting.Or(GetErrorRecoverySet(recognizer));
ConsumeUntil(recognizer, whatFollowsLoopIterationOrRule);
break;
}
default:
{
// do nothing if we can't identify the exact kind of ATN state
break;
}
}
}
/// <summary>
/// This is called by
/// <see cref="ReportError(Parser, RecognitionException)"/>
/// when the exception is a
/// <see cref="NoViableAltException"/>
/// .
/// </summary>
/// <seealso cref="ReportError(Parser, RecognitionException)"/>
/// <param name="recognizer">the parser instance</param>
/// <param name="e">the recognition exception</param>
protected internal virtual void ReportNoViableAlternative(Parser recognizer, NoViableAltException e)
{
ITokenStream tokens = ((ITokenStream)recognizer.InputStream);
string input;
if (tokens != null)
{
if (e.StartToken.Type == TokenConstants.EOF)
{
input = "<EOF>";
}
else
{
input = tokens.GetText(e.StartToken, e.OffendingToken);
}
}
else
{
input = "<unknown input>";
}
string msg = "no viable alternative at input " + EscapeWSAndQuote(input);
NotifyErrorListeners(recognizer, msg, e);
}
/// <summary>
/// This is called by
/// <see cref="ReportError(Parser, RecognitionException)"/>
/// when the exception is an
/// <see cref="InputMismatchException"/>
/// .
/// </summary>
/// <seealso cref="ReportError(Parser, RecognitionException)"/>
/// <param name="recognizer">the parser instance</param>
/// <param name="e">the recognition exception</param>
protected internal virtual void ReportInputMismatch(Parser recognizer, InputMismatchException e)
{
string msg = "mismatched input " + GetTokenErrorDisplay(e.OffendingToken) + " expecting " + e.GetExpectedTokens().ToString(recognizer.Vocabulary);
NotifyErrorListeners(recognizer, msg, e);
}
/// <summary>
/// This is called by
/// <see cref="ReportError(Parser, RecognitionException)"/>
/// when the exception is a
/// <see cref="FailedPredicateException"/>
/// .
/// </summary>
/// <seealso cref="ReportError(Parser, RecognitionException)"/>
/// <param name="recognizer">the parser instance</param>
/// <param name="e">the recognition exception</param>
protected internal virtual void ReportFailedPredicate(Parser recognizer, FailedPredicateException e)
{
string ruleName = recognizer.RuleNames[recognizer.RuleContext.RuleIndex];
string msg = "rule " + ruleName + " " + e.Message;
NotifyErrorListeners(recognizer, msg, e);
}
/// <summary>
/// This method is called to report a syntax error which requires the removal
/// of a token from the input stream.
/// </summary>
/// <remarks>
/// This method is called to report a syntax error which requires the removal
/// of a token from the input stream. At the time this method is called, the
/// erroneous symbol is current
/// <c>LT(1)</c>
/// symbol and has not yet been
/// removed from the input stream. When this method returns,
/// <paramref name="recognizer"/>
/// is in error recovery mode.
/// <p>This method is called when
/// <see cref="SingleTokenDeletion(Parser)"/>
/// identifies
/// single-token deletion as a viable recovery strategy for a mismatched
/// input error.</p>
/// <p>The default implementation simply returns if the handler is already in
/// error recovery mode. Otherwise, it calls
/// <see cref="BeginErrorCondition(Parser)"/>
/// to
/// enter error recovery mode, followed by calling
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// .</p>
/// </remarks>
/// <param name="recognizer">the parser instance</param>
protected internal virtual void ReportUnwantedToken(Parser recognizer)
{
if (InErrorRecoveryMode(recognizer))
{
return;
}
BeginErrorCondition(recognizer);
IToken t = recognizer.CurrentToken;
string tokenName = GetTokenErrorDisplay(t);
IntervalSet expecting = GetExpectedTokens(recognizer);
string msg = "extraneous input " + tokenName + " expecting " + expecting.ToString(recognizer.Vocabulary);
recognizer.NotifyErrorListeners(t, msg, null);
}
/// <summary>
/// This method is called to report a syntax error which requires the
/// insertion of a missing token into the input stream.
/// </summary>
/// <remarks>
/// This method is called to report a syntax error which requires the
/// insertion of a missing token into the input stream. At the time this
/// method is called, the missing token has not yet been inserted. When this
/// method returns,
/// <paramref name="recognizer"/>
/// is in error recovery mode.
/// <p>This method is called when
/// <see cref="SingleTokenInsertion(Parser)"/>
/// identifies
/// single-token insertion as a viable recovery strategy for a mismatched
/// input error.</p>
/// <p>The default implementation simply returns if the handler is already in
/// error recovery mode. Otherwise, it calls
/// <see cref="BeginErrorCondition(Parser)"/>
/// to
/// enter error recovery mode, followed by calling
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// .</p>
/// </remarks>
/// <param name="recognizer">the parser instance</param>
protected internal virtual void ReportMissingToken(Parser recognizer)
{
if (InErrorRecoveryMode(recognizer))
{
return;
}
BeginErrorCondition(recognizer);
IToken t = recognizer.CurrentToken;
IntervalSet expecting = GetExpectedTokens(recognizer);
string msg = "missing " + expecting.ToString(recognizer.Vocabulary) + " at " + GetTokenErrorDisplay(t);
recognizer.NotifyErrorListeners(t, msg, null);
}
/// <summary>
/// <inheritDoc/>
/// <p>The default implementation attempts to recover from the mismatched input
/// by using single token insertion and deletion as described below. If the
/// recovery attempt fails, this method throws an
/// <see cref="InputMismatchException"/>
/// .</p>
/// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
/// <p>
/// <c>LA(1)</c>
/// is not what we are looking for. If
/// <c>LA(2)</c>
/// has the
/// right token, however, then assume
/// <c>LA(1)</c>
/// is some extra spurious
/// token and delete it. Then consume and return the next token (which was
/// the
/// <c>LA(2)</c>
/// token) as the successful result of the match operation.</p>
/// <p>This recovery strategy is implemented by
/// <see cref="SingleTokenDeletion(Parser)"/>
/// .</p>
/// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
/// <p>If current token (at
/// <c>LA(1)</c>
/// ) is consistent with what could come
/// after the expected
/// <c>LA(1)</c>
/// token, then assume the token is missing
/// and use the parser's
/// <see cref="ITokenFactory"/>
/// to create it on the fly. The
/// "insertion" is performed by returning the created token as the successful
/// result of the match operation.</p>
/// <p>This recovery strategy is implemented by
/// <see cref="SingleTokenInsertion(Parser)"/>
/// .</p>
/// <p><strong>EXAMPLE</strong></p>
/// <p>For example, Input
/// <c>i=(3;</c>
/// is clearly missing the
/// <c>')'</c>
/// . When
/// the parser returns from the nested call to
/// <c>expr</c>
/// , it will have
/// call chain:</p>
/// <pre>
/// stat &#x2192; expr &#x2192; atom
/// </pre>
/// and it will be trying to match the
/// <c>')'</c>
/// at this point in the
/// derivation:
/// <pre>
/// =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
/// ^
/// </pre>
/// The attempt to match
/// <c>')'</c>
/// will fail when it sees
/// <c>';'</c>
/// and
/// call
/// <see cref="RecoverInline(Parser)"/>
/// . To recover, it sees that
/// <c>LA(1)==';'</c>
/// is in the set of tokens that can follow the
/// <c>')'</c>
/// token reference
/// in rule
/// <c>atom</c>
/// . It can assume that you forgot the
/// <c>')'</c>
/// .
/// </summary>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
public virtual IToken RecoverInline(Parser recognizer)
{
// SINGLE TOKEN DELETION
IToken matchedSymbol = SingleTokenDeletion(recognizer);
if (matchedSymbol != null)
{
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.Consume();
return matchedSymbol;
}
// SINGLE TOKEN INSERTION
if (SingleTokenInsertion(recognizer))
{
return GetMissingSymbol(recognizer);
}
// even that didn't work; must throw the exception
throw new InputMismatchException(recognizer);
}
/// <summary>
/// This method implements the single-token insertion inline error recovery
/// strategy.
/// </summary>
/// <remarks>
/// This method implements the single-token insertion inline error recovery
/// strategy. It is called by
/// <see cref="RecoverInline(Parser)"/>
/// if the single-token
/// deletion strategy fails to recover from the mismatched input. If this
/// method returns
/// <see langword="true"/>
/// ,
/// <paramref name="recognizer"/>
/// will be in error recovery
/// mode.
/// <p>This method determines whether or not single-token insertion is viable by
/// checking if the
/// <c>LA(1)</c>
/// input symbol could be successfully matched
/// if it were instead the
/// <c>LA(2)</c>
/// symbol. If this method returns
/// <see langword="true"/>
/// , the caller is responsible for creating and inserting a
/// token with the correct type to produce this behavior.</p>
/// </remarks>
/// <param name="recognizer">the parser instance</param>
/// <returns>
///
/// <see langword="true"/>
/// if single-token insertion is a viable recovery
/// strategy for the current mismatched input, otherwise
/// <see langword="false"/>
/// </returns>
protected internal virtual bool SingleTokenInsertion(Parser recognizer)
{
int currentSymbolType = ((ITokenStream)recognizer.InputStream).LA(1);
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token; error recovery
// is free to conjure up and insert the missing token
ATNState currentState = recognizer.Interpreter.atn.states[recognizer.State];
ATNState next = currentState.Transition(0).target;
ATN atn = recognizer.Interpreter.atn;
IntervalSet expectingAtLL2 = atn.NextTokens(next, recognizer.RuleContext);
if (expectingAtLL2.Contains(currentSymbolType))
{
ReportMissingToken(recognizer);
return true;
}
return false;
}
/// <summary>
/// This method implements the single-token deletion inline error recovery
/// strategy.
/// </summary>
/// <remarks>
/// This method implements the single-token deletion inline error recovery
/// strategy. It is called by
/// <see cref="RecoverInline(Parser)"/>
/// to attempt to recover
/// from mismatched input. If this method returns null, the parser and error
/// handler state will not have changed. If this method returns non-null,
/// <paramref name="recognizer"/>
/// will <em>not</em> be in error recovery mode since the
/// returned token was a successful match.
/// <p>If the single-token deletion is successful, this method calls
/// <see cref="ReportUnwantedToken(Parser)"/>
/// to report the error, followed by
/// <see cref="Parser.Consume()"/>
/// to actually "delete" the extraneous token. Then,
/// before returning
/// <see cref="ReportMatch(Parser)"/>
/// is called to signal a successful
/// match.</p>
/// </remarks>
/// <param name="recognizer">the parser instance</param>
/// <returns>
/// the successfully matched
/// <see cref="IToken"/>
/// instance if single-token
/// deletion successfully recovers from the mismatched input, otherwise
/// <see langword="null"/>
/// </returns>
[return: Nullable]
protected internal virtual IToken SingleTokenDeletion(Parser recognizer)
{
int nextTokenType = ((ITokenStream)recognizer.InputStream).LA(2);
IntervalSet expecting = GetExpectedTokens(recognizer);
if (expecting.Contains(nextTokenType))
{
ReportUnwantedToken(recognizer);
recognizer.Consume();
// simply delete extra token
// we want to return the token we're actually matching
IToken matchedSymbol = recognizer.CurrentToken;
ReportMatch(recognizer);
// we know current token is correct
return matchedSymbol;
}
return null;
}
/// <summary>Conjure up a missing token during error recovery.</summary>
/// <remarks>
/// Conjure up a missing token during error recovery.
/// The recognizer attempts to recover from single missing
/// symbols. But, actions might refer to that missing symbol.
/// For example, x=ID {f($x);}. The action clearly assumes
/// that there has been an identifier matched previously and that
/// $x points at that token. If that token is missing, but
/// the next token in the stream is what we want we assume that
/// this token is missing and we keep going. Because we
/// have to return some token to replace the missing token,
/// we have to conjure one up. This method gives the user control
/// over the tokens returned for missing tokens. Mostly,
/// you will want to create something special for identifier
/// tokens. For literals such as '{' and ',', the default
/// action in the parser or tree parser works. It simply creates
/// a CommonToken of the appropriate type. The text will be the token.
/// If you change what tokens must be created by the lexer,
/// override this method to create the appropriate tokens.
/// </remarks>
[return: NotNull]
protected internal virtual IToken GetMissingSymbol(Parser recognizer)
{
IToken currentSymbol = recognizer.CurrentToken;
IntervalSet expecting = GetExpectedTokens(recognizer);
int expectedTokenType = expecting.MinElement;
// get any element
string tokenText;
if (expectedTokenType == TokenConstants.EOF)
{
tokenText = "<missing EOF>";
}
else
{
tokenText = "<missing " + recognizer.Vocabulary.GetDisplayName(expectedTokenType) + ">";
}
IToken current = currentSymbol;
IToken lookback = ((ITokenStream)recognizer.InputStream).LT(-1);
if (current.Type == TokenConstants.EOF && lookback != null)
{
current = lookback;
}
return ConstructToken(((ITokenStream)recognizer.InputStream).TokenSource, expectedTokenType, tokenText, current);
}
protected internal virtual IToken ConstructToken(ITokenSource tokenSource, int expectedTokenType, string tokenText, IToken current)
{
ITokenFactory factory = tokenSource.TokenFactory;
return factory.Create(Tuple.Create(tokenSource, current.TokenSource.InputStream), expectedTokenType, tokenText, TokenConstants.DefaultChannel, -1, -1, current.Line, current.Column);
}
[return: NotNull]
protected internal virtual IntervalSet GetExpectedTokens(Parser recognizer)
{
return recognizer.GetExpectedTokens();
}
/// <summary>
/// How should a token be displayed in an error message? The default
/// is to display just the text, but during development you might
/// want to have a lot of information spit out.
/// </summary>
/// <remarks>
/// How should a token be displayed in an error message? The default
/// is to display just the text, but during development you might
/// want to have a lot of information spit out. Override in that case
/// to use t.toString() (which, for CommonToken, dumps everything about
/// the token). This is better than forcing you to override a method in
/// your token objects because you don't have to go modify your lexer
/// so that it creates a new Java type.
/// </remarks>
protected internal virtual string GetTokenErrorDisplay(IToken t)
{
if (t == null)
{
return "<no token>";
}
string s = GetSymbolText(t);
if (s == null)
{
if (GetSymbolType(t) == TokenConstants.EOF)
{
s = "<EOF>";
}
else
{
s = "<" + GetSymbolType(t) + ">";
}
}
return EscapeWSAndQuote(s);
}
protected internal virtual string GetSymbolText(IToken symbol)
{
return symbol.Text;
}
protected internal virtual int GetSymbolType(IToken symbol)
{
return symbol.Type;
}
[return: NotNull]
protected internal virtual string EscapeWSAndQuote(string s)
{
// if ( s==null ) return s;
s = s.Replace("\n", "\\n");
s = s.Replace("\r", "\\r");
s = s.Replace("\t", "\\t");
return "'" + s + "'";
}
[return: NotNull]
protected internal virtual IntervalSet GetErrorRecoverySet(Parser recognizer)
{
ATN atn = recognizer.Interpreter.atn;
RuleContext ctx = recognizer.RuleContext;
IntervalSet recoverSet = new IntervalSet();
while (ctx != null && ctx.invokingState >= 0)
{
// compute what follows who invoked us
ATNState invokingState = atn.states[ctx.invokingState];
RuleTransition rt = (RuleTransition)invokingState.Transition(0);
IntervalSet follow = atn.NextTokens(rt.followState);
recoverSet.AddAll(follow);
ctx = ctx.Parent;
}
recoverSet.Remove(TokenConstants.EPSILON);
// System.out.println("recover set "+recoverSet.toString(recognizer.getTokenNames()));
return recoverSet;
}
/// <summary>Consume tokens until one matches the given token set.</summary>
/// <remarks>Consume tokens until one matches the given token set.</remarks>
protected internal virtual void ConsumeUntil(Parser recognizer, IntervalSet set)
{
// System.err.println("consumeUntil("+set.toString(recognizer.getTokenNames())+")");
int ttype = ((ITokenStream)recognizer.InputStream).LA(1);
while (ttype != TokenConstants.EOF && !set.Contains(ttype))
{
//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
// recognizer.getInputStream().consume();
recognizer.Consume();
ttype = ((ITokenStream)recognizer.InputStream).LA(1);
}
}
}
}

Просмотреть файл

@ -0,0 +1,30 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using Antlr4.Runtime;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <author>Sam Harwell</author>
[Flags]
internal enum Dependents
{
None = 0,
Self = 1 << 0,
Parents = 1 << 1,
Children = 1 << 2,
Ancestors = 1 << 3,
Descendants = 1 << 4,
Siblings = 1 << 5,
PreceedingSiblings = 1 << 6,
FollowingSiblings = 1 << 7,
Preceeding = 1 << 8,
Following = 1 << 9
}
}

Просмотреть файл

@ -0,0 +1,100 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections;
using System.Collections.Generic;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <author>Sam Harwell</author>
internal abstract class AbstractEdgeMap<T> : IEdgeMap<T>
where T : class
{
protected internal readonly int minIndex;
protected internal readonly int maxIndex;
protected AbstractEdgeMap(int minIndex, int maxIndex)
{
// the allowed range (with minIndex and maxIndex inclusive) should be less than 2^32
System.Diagnostics.Debug.Assert(maxIndex - minIndex + 1 >= 0);
this.minIndex = minIndex;
this.maxIndex = maxIndex;
}
public abstract Antlr4.Runtime.Dfa.AbstractEdgeMap<T> Put(int key, T value);
IEdgeMap<T> IEdgeMap<T>.Put(int key, T value)
{
return Put(key, value);
}
public virtual Antlr4.Runtime.Dfa.AbstractEdgeMap<T> PutAll(IEdgeMap<T> m)
{
Antlr4.Runtime.Dfa.AbstractEdgeMap<T> result = this;
foreach (KeyValuePair<int, T> entry in m)
{
result = result.Put(entry.Key, entry.Value);
}
return result;
}
IEdgeMap<T> IEdgeMap<T>.PutAll(IEdgeMap<T> m)
{
return PutAll(m);
}
public abstract Antlr4.Runtime.Dfa.AbstractEdgeMap<T> Clear();
IEdgeMap<T> IEdgeMap<T>.Clear()
{
return Clear();
}
public abstract Antlr4.Runtime.Dfa.AbstractEdgeMap<T> Remove(int key);
IEdgeMap<T> IEdgeMap<T>.Remove(int key)
{
return Remove(key);
}
public abstract bool ContainsKey(int arg1);
public abstract T this[int arg1]
{
get;
}
public abstract bool IsEmpty
{
get;
}
public abstract int Count
{
get;
}
#if NET45PLUS
public abstract IReadOnlyDictionary<int, T> ToMap();
#else
public abstract IDictionary<int, T> ToMap();
#endif
public virtual IEnumerator<KeyValuePair<int, T>> GetEnumerator()
{
return ToMap().GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
}
}

Просмотреть файл

@ -0,0 +1,74 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <summary>
/// Stores information about a
/// <see cref="DFAState"/>
/// which is an accept state under
/// some condition. Certain settings, such as
/// <see cref="Antlr4.Runtime.Atn.ParserATNSimulator.PredictionMode()"/>
/// , may be used in addition to
/// this information to determine whether or not a particular state is an accept
/// state.
/// </summary>
/// <author>Sam Harwell</author>
internal class AcceptStateInfo
{
private readonly int prediction;
private readonly Antlr4.Runtime.Atn.LexerActionExecutor lexerActionExecutor;
public AcceptStateInfo(int prediction)
{
this.prediction = prediction;
this.lexerActionExecutor = null;
}
public AcceptStateInfo(int prediction, Antlr4.Runtime.Atn.LexerActionExecutor lexerActionExecutor)
{
this.prediction = prediction;
this.lexerActionExecutor = lexerActionExecutor;
}
/// <summary>Gets the prediction made by this accept state.</summary>
/// <remarks>
/// Gets the prediction made by this accept state. Note that this value
/// assumes the predicates, if any, in the
/// <see cref="DFAState"/>
/// evaluate to
/// <see langword="true"/>
/// . If predicate evaluation is enabled, the final prediction of
/// the accept state will be determined by the result of predicate
/// evaluation.
/// </remarks>
public virtual int Prediction
{
get
{
return prediction;
}
}
/// <summary>
/// Gets the
/// <see cref="Antlr4.Runtime.Atn.LexerActionExecutor"/>
/// which can be used to execute actions
/// and/or commands after the lexer matches a token.
/// </summary>
public virtual Antlr4.Runtime.Atn.LexerActionExecutor LexerActionExecutor
{
get
{
return lexerActionExecutor;
}
}
}
}

Просмотреть файл

@ -0,0 +1,196 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Sharpen;
using Interlocked = System.Threading.Interlocked;
#if NET45PLUS
using Volatile = System.Threading.Volatile;
#elif !PORTABLE && !COMPACT
using Thread = System.Threading.Thread;
#endif
namespace Antlr4.Runtime.Dfa
{
/// <author>Sam Harwell</author>
internal sealed class ArrayEdgeMap<T> : AbstractEdgeMap<T>
where T : class
{
private readonly T[] arrayData;
private int size;
public ArrayEdgeMap(int minIndex, int maxIndex)
: base(minIndex, maxIndex)
{
arrayData = new T[maxIndex - minIndex + 1];
}
public override int Count
{
get
{
#if NET45PLUS
return Volatile.Read(ref size);
#elif !PORTABLE && !COMPACT
return Thread.VolatileRead(ref size);
#else
return Interlocked.CompareExchange(ref size, 0, 0);
#endif
}
}
public override bool IsEmpty
{
get
{
return Count == 0;
}
}
public override bool ContainsKey(int key)
{
return this[key] != null;
}
public override T this[int key]
{
get
{
if (key < minIndex || key > maxIndex)
{
return null;
}
#if NET45PLUS
return Volatile.Read(ref arrayData[key - minIndex]);
#else
return Interlocked.CompareExchange(ref arrayData[key - minIndex], null, null);
#endif
}
}
public override AbstractEdgeMap<T> Put(int key, T value)
{
if (key >= minIndex && key <= maxIndex)
{
T existing = Interlocked.Exchange(ref arrayData[key - minIndex], value);
if (existing == null && value != null)
{
Interlocked.Increment(ref size);
}
else
{
if (existing != null && value == null)
{
Interlocked.Decrement(ref size);
}
}
}
return this;
}
public override AbstractEdgeMap<T> Remove(int key)
{
return Put(key, null);
}
public override AbstractEdgeMap<T> PutAll(IEdgeMap<T> m)
{
if (m.IsEmpty)
{
return this;
}
if (m is Antlr4.Runtime.Dfa.ArrayEdgeMap<T>)
{
Antlr4.Runtime.Dfa.ArrayEdgeMap<T> other = (Antlr4.Runtime.Dfa.ArrayEdgeMap<T>)m;
int minOverlap = Math.Max(minIndex, other.minIndex);
int maxOverlap = Math.Min(maxIndex, other.maxIndex);
Antlr4.Runtime.Dfa.ArrayEdgeMap<T> result = this;
for (int i = minOverlap; i <= maxOverlap; i++)
{
result = ((Antlr4.Runtime.Dfa.ArrayEdgeMap<T>)result.Put(i, m[i]));
}
return result;
}
else
{
if (m is SingletonEdgeMap<T>)
{
SingletonEdgeMap<T> other = (SingletonEdgeMap<T>)m;
System.Diagnostics.Debug.Assert(!other.IsEmpty);
return Put(other.Key, other.Value);
}
else
{
if (m is SparseEdgeMap<T>)
{
SparseEdgeMap<T> other = (SparseEdgeMap<T>)m;
lock (other)
{
int[] keys = other.Keys;
IList<T> values = other.Values;
Antlr4.Runtime.Dfa.ArrayEdgeMap<T> result = this;
for (int i = 0; i < values.Count; i++)
{
result = ((Antlr4.Runtime.Dfa.ArrayEdgeMap<T>)result.Put(keys[i], values[i]));
}
return result;
}
}
else
{
throw new NotSupportedException(string.Format("EdgeMap of type {0} is supported yet.", m.GetType().FullName));
}
}
}
}
public override AbstractEdgeMap<T> Clear()
{
return new EmptyEdgeMap<T>(minIndex, maxIndex);
}
#if NET45PLUS
public override IReadOnlyDictionary<int, T> ToMap()
#else
public override IDictionary<int, T> ToMap()
#endif
{
if (IsEmpty)
{
return Sharpen.Collections.EmptyMap<int, T>();
}
#if COMPACT
IDictionary<int, T> result = new SortedList<int, T>();
#elif PORTABLE && !NET45PLUS
IDictionary<int, T> result = new Dictionary<int, T>();
#else
IDictionary<int, T> result = new SortedDictionary<int, T>();
#endif
for (int i = 0; i < arrayData.Length; i++)
{
T element = arrayData[i];
if (element == null)
{
continue;
}
result[i + minIndex] = element;
}
#if NET45PLUS
return new ReadOnlyDictionary<int, T>(result);
#else
return result;
#endif
}
}
}

Просмотреть файл

@ -0,0 +1,173 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
internal class DFA
{
/** A set of all DFA states. Use {@link Map} so we can get old state back
* ({@link Set} only allows you to see if it's there).
*/
public Dictionary<DFAState, DFAState> states = new Dictionary<DFAState, DFAState>();
public DFAState s0;
public int decision;
/** From which ATN state did we create this DFA? */
public DecisionState atnStartState;
/**
* {@code true} if this DFA is for a precedence decision; otherwise,
* {@code false}. This is the backing field for {@link #isPrecedenceDfa}.
*/
private bool precedenceDfa;
public DFA(DecisionState atnStartState)
: this(atnStartState, 0)
{
}
public DFA(DecisionState atnStartState, int decision)
{
this.atnStartState = atnStartState;
this.decision = decision;
this.precedenceDfa = false;
if (atnStartState is StarLoopEntryState && ((StarLoopEntryState)atnStartState).isPrecedenceDecision)
{
this.precedenceDfa = true;
DFAState precedenceState = new DFAState(new ATNConfigSet());
precedenceState.edges = new DFAState[0];
precedenceState.isAcceptState = false;
precedenceState.requiresFullContext = false;
this.s0 = precedenceState;
}
}
/**
* Gets whether this DFA is a precedence DFA. Precedence DFAs use a special
* start state {@link #s0} which is not stored in {@link #states}. The
* {@link DFAState#edges} array for this start state contains outgoing edges
* supplying individual start states corresponding to specific precedence
* values.
*
* @return {@code true} if this is a precedence DFA; otherwise,
* {@code false}.
* @see Parser#getPrecedence()
*/
public bool IsPrecedenceDfa
{
get
{
return precedenceDfa;
}
}
/**
* Get the start state for a specific precedence value.
*
* @param precedence The current precedence.
* @return The start state corresponding to the specified precedence, or
* {@code null} if no start state exists for the specified precedence.
*
* @throws IllegalStateException if this is not a precedence DFA.
* @see #isPrecedenceDfa()
*/
public DFAState GetPrecedenceStartState(int precedence)
{
if (!IsPrecedenceDfa)
{
throw new Exception("Only precedence DFAs may contain a precedence start state.");
}
// s0.edges is never null for a precedence DFA
if (precedence < 0 || precedence >= s0.edges.Length)
{
return null;
}
return s0.edges[precedence];
}
/**
* Set the start state for a specific precedence value.
*
* @param precedence The current precedence.
* @param startState The start state corresponding to the specified
* precedence.
*
* @throws IllegalStateException if this is not a precedence DFA.
* @see #isPrecedenceDfa()
*/
public void SetPrecedenceStartState(int precedence, DFAState startState)
{
if (!IsPrecedenceDfa)
{
throw new Exception("Only precedence DFAs may contain a precedence start state.");
}
if (precedence < 0)
{
return;
}
// synchronization on s0 here is ok. when the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again
lock (s0)
{
// s0.edges is never null for a precedence DFA
if (precedence >= s0.edges.Length)
{
s0.edges = Arrays.CopyOf(s0.edges, precedence + 1);
}
s0.edges[precedence] = startState;
}
}
/**
* Return a list of all states in this DFA, ordered by state number.
*/
public List<DFAState> GetStates()
{
List<DFAState> result = new List<DFAState>(states.Keys);
result.Sort((x, y) => x.stateNumber - y.stateNumber);
return result;
}
public override String ToString() { return ToString(Vocabulary.EmptyVocabulary); }
public String ToString(IVocabulary vocabulary)
{
if (s0 == null)
{
return "";
}
DFASerializer serializer = new DFASerializer(this, vocabulary);
return serializer.ToString();
}
public String ToLexerString()
{
if (s0 == null)
return "";
DFASerializer serializer = new LexerDFASerializer(this);
return serializer.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,136 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <summary>A DFA walker that knows how to dump them to serialized strings.</summary>
/// <remarks>A DFA walker that knows how to dump them to serialized strings.</remarks>
internal class DFASerializer
{
[NotNull]
private readonly DFA dfa;
[NotNull]
private readonly IVocabulary vocabulary;
[Nullable]
internal readonly string[] ruleNames;
[Nullable]
internal readonly ATN atn;
public DFASerializer(DFA dfa, IVocabulary vocabulary)
: this(dfa, vocabulary, null, null)
{
}
public DFASerializer(DFA dfa, IRecognizer parser)
: this(dfa, parser != null ? parser.Vocabulary : Vocabulary.EmptyVocabulary, parser != null ? parser.RuleNames : null, parser != null ? parser.Atn : null)
{
}
public DFASerializer(DFA dfa, IVocabulary vocabulary, string[] ruleNames, ATN atn)
{
this.dfa = dfa;
this.vocabulary = vocabulary;
this.ruleNames = ruleNames;
this.atn = atn;
}
public override string ToString()
{
if (dfa.s0 == null)
{
return null;
}
StringBuilder buf = new StringBuilder();
if (dfa.states != null)
{
List<DFAState> states = new List<DFAState>(dfa.states.Values);
states.Sort((x,y)=>x.stateNumber - y.stateNumber);
foreach (DFAState s in states)
{
int n = s.edges != null ? s.edges.Length : 0;
for (int i = 0; i < n; i++)
{
DFAState t = s.edges[i];
if (t != null && t.stateNumber != int.MaxValue)
{
buf.Append(GetStateString(s));
String label = GetEdgeLabel(i);
buf.Append("-");
buf.Append(label);
buf.Append("->");
buf.Append(GetStateString(t));
buf.Append('\n');
}
}
}
}
string output = buf.ToString();
if (output.Length == 0)
{
return null;
}
return output;
}
protected internal virtual string GetContextLabel(int i)
{
if (i == PredictionContext.EMPTY_RETURN_STATE)
{
return "ctx:EMPTY";
}
if (atn != null && i > 0 && i <= atn.states.Count)
{
ATNState state = atn.states[i];
int ruleIndex = state.ruleIndex;
if (ruleNames != null && ruleIndex >= 0 && ruleIndex < ruleNames.Length)
{
return "ctx:" + i.ToString() + "(" + ruleNames[ruleIndex] + ")";
}
}
return "ctx:" + i.ToString();
}
protected internal virtual string GetEdgeLabel(int i)
{
return vocabulary.GetDisplayName(i - 1);
}
internal virtual string GetStateString(DFAState s)
{
if (s == ATNSimulator.ERROR)
{
return "ERROR";
}
int n = s.stateNumber;
string baseStateStr = (s.isAcceptState ? ":" : "") + "s" + n + (s.requiresFullContext ? "^" : "");
if ( s.isAcceptState ) {
if ( s.predicates!=null ) {
return baseStateStr + "=>" + Arrays.ToString(s.predicates);
}
else {
return baseStateStr + "=>" + s.prediction;
}
}
else {
return baseStateStr;
}
}
}
}

Просмотреть файл

@ -0,0 +1,187 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <summary>A DFA state represents a set of possible ATN configurations.</summary>
/// <remarks>
/// A DFA state represents a set of possible ATN configurations.
/// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
/// to keep track of all possible states the ATN can be in after
/// reading each input symbol. That is to say, after reading
/// input a1a2..an, the DFA is in a state that represents the
/// subset T of the states of the ATN that are reachable from the
/// ATN's start state along some path labeled a1a2..an."
/// In conventional NFA&#x2192;DFA conversion, therefore, the subset T
/// would be a bitset representing the set of states the
/// ATN could be in. We need to track the alt predicted by each
/// state as well, however. More importantly, we need to maintain
/// a stack of states, tracking the closure operations as they
/// jump from rule to rule, emulating rule invocations (method calls).
/// I have to add a stack to simulate the proper lookahead sequences for
/// the underlying LL grammar from which the ATN was derived.
/// <p>I use a set of ATNConfig objects not simple states. An ATNConfig
/// is both a state (ala normal conversion) and a RuleContext describing
/// the chain of rules (if any) followed to arrive at that state.</p>
/// <p>A DFA state may have multiple references to a particular state,
/// but with different ATN contexts (with same or different alts)
/// meaning that state was reached via a different set of rule invocations.</p>
/// </remarks>
internal class DFAState
{
public int stateNumber = -1;
public ATNConfigSet configSet = new ATNConfigSet();
/** {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
* {@link Token#EOF} maps to {@code edges[0]}.
*/
public DFAState[] edges;
public bool isAcceptState = false;
/** if accept state, what ttype do we match or alt do we predict?
* This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
* {@link #requiresFullContext}.
*/
public int prediction;
public LexerActionExecutor lexerActionExecutor;
/**
* Indicates that this state was created during SLL prediction that
* discovered a conflict between the configurations in the state. Future
* {@link ParserATNSimulator#execATN} invocations immediately jumped doing
* full context prediction if this field is true.
*/
public bool requiresFullContext;
/** During SLL parsing, this is a list of predicates associated with the
* ATN configurations of the DFA state. When we have predicates,
* {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
* on-the-fly. If this is not null, then {@link #prediction} is
* {@link ATN#INVALID_ALT_NUMBER}.
*
* <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
* means we know from the context (it's $ or we don't dip into outer
* context) that it's an ambiguity not a conflict.</p>
*
* <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
*/
public PredPrediction[] predicates;
public DFAState() { }
public DFAState(int stateNumber) { this.stateNumber = stateNumber; }
public DFAState(ATNConfigSet configs) { this.configSet = configs; }
/** Get the set of all alts mentioned by all ATN configurations in this
* DFA state.
*/
public HashSet<int> getAltSet()
{
HashSet<int> alts = new HashSet<int>();
if (configSet != null)
{
foreach (ATNConfig c in configSet.configs)
{
alts.Add(c.alt);
}
}
if (alts.Count==0)
return null;
return alts;
}
public override int GetHashCode()
{
int hash = MurmurHash.Initialize(7);
hash = MurmurHash.Update(hash, configSet.GetHashCode());
hash = MurmurHash.Finish(hash, 1);
return hash;
}
/**
* Two {@link DFAState} instances are equal if their ATN configuration sets
* are the same. This method is used to see if a state already exists.
*
* <p>Because the number of alternatives and number of ATN configurations are
* finite, there is a finite number of DFA states that can be processed.
* This is necessary to show that the algorithm terminates.</p>
*
* <p>Cannot test the DFA state numbers here because in
* {@link ParserATNSimulator#addDFAState} we need to know if any other state
* exists that has this exact set of ATN configurations. The
* {@link #stateNumber} is irrelevant.</p>
*/
public override bool Equals(Object o)
{
// compare set of ATN configurations in this set with other
if (this == o) return true;
if (!(o is DFAState))
{
return false;
}
DFAState other = (DFAState)o;
// TODO (sam): what to do when configs==null?
bool sameSet = this.configSet.Equals(other.configSet);
// System.out.println("DFAState.equals: "+configs+(sameSet?"==":"!=")+other.configs);
return sameSet;
}
public override String ToString()
{
StringBuilder buf = new StringBuilder();
buf.Append(stateNumber).Append(":").Append(configSet);
if (isAcceptState)
{
buf.Append("=>");
if (predicates != null)
{
buf.Append(Arrays.ToString(predicates));
}
else {
buf.Append(prediction);
}
}
return buf.ToString();
}
}
/** Map a predicate to a predicted alternative. */
internal class PredPrediction
{
public SemanticContext pred; // never null; at least SemanticContext.NONE
public int alt;
public PredPrediction(SemanticContext pred, int alt)
{
this.alt = alt;
this.pred = pred;
}
public override String ToString()
{
return "(" + pred + ", " + alt + ")";
}
}
}

Просмотреть файл

@ -0,0 +1,94 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Sharpen;
#if NET45PLUS
using System.Collections.ObjectModel;
#endif
namespace Antlr4.Runtime.Dfa
{
/// <summary>
/// This implementation of
/// <see cref="AbstractEdgeMap{T}"/>
/// represents an empty edge map.
/// </summary>
/// <author>Sam Harwell</author>
internal sealed class EmptyEdgeMap<T> : AbstractEdgeMap<T>
where T : class
{
public EmptyEdgeMap(int minIndex, int maxIndex)
: base(minIndex, maxIndex)
{
}
public override AbstractEdgeMap<T> Put(int key, T value)
{
if (value == null || key < minIndex || key > maxIndex)
{
// remains empty
return this;
}
return new SingletonEdgeMap<T>(minIndex, maxIndex, key, value);
}
public override AbstractEdgeMap<T> Clear()
{
return this;
}
public override AbstractEdgeMap<T> Remove(int key)
{
return this;
}
public override int Count
{
get
{
return 0;
}
}
public override bool IsEmpty
{
get
{
return true;
}
}
public override bool ContainsKey(int key)
{
return false;
}
public override T this[int key]
{
get
{
return null;
}
}
#if NET45PLUS
public override IReadOnlyDictionary<int, T> ToMap()
#else
public override IDictionary<int, T> ToMap()
#endif
{
Dictionary<int, T> result = new Dictionary<int, T>();
#if NET45PLUS
return new ReadOnlyDictionary<int, T>(result);
#else
return result;
#endif
}
}
}

Просмотреть файл

@ -0,0 +1,53 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Dfa
{
/// <author>Sam Harwell</author>
internal interface IEdgeMap<T> : IEnumerable<KeyValuePair<int, T>>
{
int Count
{
get;
}
bool IsEmpty
{
get;
}
bool ContainsKey(int key);
T this[int key]
{
get;
}
[return: NotNull]
IEdgeMap<T> Put(int key, T value);
[return: NotNull]
IEdgeMap<T> Remove(int key);
[return: NotNull]
IEdgeMap<T> PutAll(IEdgeMap<T> m);
[return: NotNull]
IEdgeMap<T> Clear();
#if NET45PLUS
[return: NotNull]
IReadOnlyDictionary<int, T> ToMap();
#else
[return: NotNull]
IDictionary<int, T> ToMap();
#endif
}
}

Просмотреть файл

@ -0,0 +1,25 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime.Misc;
namespace Antlr4.Runtime.Dfa
{
internal class LexerDFASerializer : DFASerializer
{
public LexerDFASerializer(DFA dfa)
: base(dfa, Vocabulary.EmptyVocabulary)
{
}
[return: NotNull]
protected internal override string GetEdgeLabel(int i)
{
return "'" + (char)i + "'";
}
}
}

Просмотреть файл

@ -0,0 +1,142 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Collections.Generic;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <author>Sam Harwell</author>
internal sealed class SingletonEdgeMap<T> : AbstractEdgeMap<T>
where T : class
{
private readonly int key;
private readonly T value;
public SingletonEdgeMap(int minIndex, int maxIndex, int key, T value)
: base(minIndex, maxIndex)
{
if (key >= minIndex && key <= maxIndex)
{
this.key = key;
this.value = value;
}
else
{
this.key = 0;
this.value = null;
}
}
public int Key
{
get
{
return key;
}
}
public T Value
{
get
{
return value;
}
}
public override int Count
{
get
{
return value != null ? 1 : 0;
}
}
public override bool IsEmpty
{
get
{
return value == null;
}
}
public override bool ContainsKey(int key)
{
return key == this.key && value != null;
}
public override T this[int key]
{
get
{
if (key == this.key)
{
return value;
}
return null;
}
}
public override AbstractEdgeMap<T> Put(int key, T value)
{
if (key < minIndex || key > maxIndex)
{
return this;
}
if (key == this.key || this.value == null)
{
return new Antlr4.Runtime.Dfa.SingletonEdgeMap<T>(minIndex, maxIndex, key, value);
}
else
{
if (value != null)
{
AbstractEdgeMap<T> result = new SparseEdgeMap<T>(minIndex, maxIndex);
result = result.Put(this.key, this.value);
result = result.Put(key, value);
return result;
}
else
{
return this;
}
}
}
public override AbstractEdgeMap<T> Remove(int key)
{
if (key == this.key && this.value != null)
{
return new EmptyEdgeMap<T>(minIndex, maxIndex);
}
return this;
}
public override AbstractEdgeMap<T> Clear()
{
if (this.value != null)
{
return new EmptyEdgeMap<T>(minIndex, maxIndex);
}
return this;
}
#if NET45PLUS
public override IReadOnlyDictionary<int, T> ToMap()
#else
public override IDictionary<int, T> ToMap()
#endif
{
if (IsEmpty)
{
return Sharpen.Collections.EmptyMap<int, T>();
}
return Antlr4.Runtime.Sharpen.Collections.SingletonMap(key, value);
}
}
}

Просмотреть файл

@ -0,0 +1,218 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime.Dfa
{
/// <author>Sam Harwell</author>
internal sealed class SparseEdgeMap<T> : AbstractEdgeMap<T>
where T : class
{
private const int DefaultMaxSize = 5;
private readonly int[] keys;
private readonly List<T> values;
public SparseEdgeMap(int minIndex, int maxIndex)
: this(minIndex, maxIndex, DefaultMaxSize)
{
}
public SparseEdgeMap(int minIndex, int maxIndex, int maxSparseSize)
: base(minIndex, maxIndex)
{
this.keys = new int[maxSparseSize];
this.values = new List<T>(maxSparseSize);
}
private SparseEdgeMap(Antlr4.Runtime.Dfa.SparseEdgeMap<T> map, int maxSparseSize)
: base(map.minIndex, map.maxIndex)
{
lock (map)
{
if (maxSparseSize < map.values.Count)
{
throw new ArgumentException();
}
keys = Arrays.CopyOf(map.keys, maxSparseSize);
values = new List<T>(maxSparseSize);
values.AddRange(map.Values);
}
}
public int[] Keys
{
get
{
return keys;
}
}
public IList<T> Values
{
get
{
return values;
}
}
public int MaxSparseSize
{
get
{
return keys.Length;
}
}
public override int Count
{
get
{
return values.Count;
}
}
public override bool IsEmpty
{
get
{
return values.Count == 0;
}
}
public override bool ContainsKey(int key)
{
return this[key] != null;
}
public override T this[int key]
{
get
{
// Special property of this collection: values are only even added to
// the end, else a new object is returned from put(). Therefore no lock
// is required in this method.
int index = System.Array.BinarySearch(keys, 0, Count, key);
if (index < 0)
{
return null;
}
return values[index];
}
}
public override AbstractEdgeMap<T> Put(int key, T value)
{
if (key < minIndex || key > maxIndex)
{
return this;
}
if (value == null)
{
return Remove(key);
}
lock (this)
{
int index = System.Array.BinarySearch(keys, 0, Count, key);
if (index >= 0)
{
// replace existing entry
values[index] = value;
return this;
}
System.Diagnostics.Debug.Assert(index < 0 && value != null);
int insertIndex = -index - 1;
if (Count < MaxSparseSize && insertIndex == Count)
{
// stay sparse and add new entry
keys[insertIndex] = key;
values.Add(value);
return this;
}
int desiredSize = Count >= MaxSparseSize ? MaxSparseSize * 2 : MaxSparseSize;
int space = maxIndex - minIndex + 1;
// SparseEdgeMap only uses less memory than ArrayEdgeMap up to half the size of the symbol space
if (desiredSize >= space / 2)
{
ArrayEdgeMap<T> arrayMap = new ArrayEdgeMap<T>(minIndex, maxIndex);
arrayMap = ((ArrayEdgeMap<T>)arrayMap.PutAll(this));
arrayMap.Put(key, value);
return arrayMap;
}
else
{
Antlr4.Runtime.Dfa.SparseEdgeMap<T> resized = new Antlr4.Runtime.Dfa.SparseEdgeMap<T>(this, desiredSize);
System.Array.Copy(resized.keys, insertIndex, resized.keys, insertIndex + 1, Count - insertIndex);
resized.keys[insertIndex] = key;
resized.values.Insert(insertIndex, value);
return resized;
}
}
}
public override AbstractEdgeMap<T> Remove(int key)
{
lock (this)
{
int index = System.Array.BinarySearch(keys, 0, Count, key);
if (index < 0)
{
return this;
}
Antlr4.Runtime.Dfa.SparseEdgeMap<T> result = new Antlr4.Runtime.Dfa.SparseEdgeMap<T>(this, MaxSparseSize);
System.Array.Copy(result.keys, index + 1, result.keys, index, Count - index - 1);
result.values.RemoveAt(index);
return result;
}
}
public override AbstractEdgeMap<T> Clear()
{
if (IsEmpty)
{
return this;
}
return new EmptyEdgeMap<T>(minIndex, maxIndex);
}
#if NET45PLUS
public override IReadOnlyDictionary<int, T> ToMap()
#else
public override IDictionary<int, T> ToMap()
#endif
{
if (IsEmpty)
{
return Sharpen.Collections.EmptyMap<int, T>();
}
lock (this)
{
#if COMPACT
IDictionary<int, T> result = new SortedList<int, T>();
#elif PORTABLE && !NET45PLUS
IDictionary<int, T> result = new Dictionary<int, T>();
#else
IDictionary<int, T> result = new SortedDictionary<int, T>();
#endif
for (int i = 0; i < Count; i++)
{
result[keys[i]] = values[i];
}
#if NET45PLUS
return new ReadOnlyDictionary<int, T>(result);
#else
return result;
#endif
}
}
}
}

Просмотреть файл

@ -0,0 +1,166 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Dfa;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>
/// This implementation of
/// <see cref="IAntlrErrorListener{Symbol}"/>
/// can be used to identify
/// certain potential correctness and performance problems in grammars. "Reports"
/// are made by calling
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// with the appropriate
/// message.
/// <ul>
/// <li><b>Ambiguities</b>: These are cases where more than one path through the
/// grammar can match the input.</li>
/// <li><b>Weak context sensitivity</b>: These are cases where full-context
/// prediction resolved an SLL conflict to a unique alternative which equaled the
/// minimum alternative of the SLL conflict.</li>
/// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
/// full-context prediction resolved an SLL conflict to a unique alternative,
/// <em>and</em> the minimum alternative of the SLL conflict was found to not be
/// a truly viable alternative. Two-stage parsing cannot be used for inputs where
/// this situation occurs.</li>
/// </ul>
/// </summary>
/// <author>Sam Harwell</author>
internal class DiagnosticErrorListener : BaseErrorListener
{
/// <summary>
/// When
/// <see langword="true"/>
/// , only exactly known ambiguities are reported.
/// </summary>
protected internal readonly bool exactOnly;
/// <summary>
/// Initializes a new instance of
/// <see cref="DiagnosticErrorListener"/>
/// which only
/// reports exact ambiguities.
/// </summary>
public DiagnosticErrorListener()
: this(true)
{
}
/// <summary>
/// Initializes a new instance of
/// <see cref="DiagnosticErrorListener"/>
/// , specifying
/// whether all ambiguities or only exact ambiguities are reported.
/// </summary>
/// <param name="exactOnly">
///
/// <see langword="true"/>
/// to report only exact ambiguities, otherwise
/// <see langword="false"/>
/// to report all ambiguities.
/// </param>
public DiagnosticErrorListener(bool exactOnly)
{
this.exactOnly = exactOnly;
}
public override void ReportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs)
{
if (exactOnly && !exact)
{
return;
}
string format = "reportAmbiguity d={0}: ambigAlts={1}, input='{2}'";
string decision = GetDecisionDescription(recognizer, dfa);
BitSet conflictingAlts = GetConflictingAlts(ambigAlts, configs);
string text = ((ITokenStream)recognizer.InputStream).GetText(Interval.Of(startIndex, stopIndex));
string message = string.Format(format, decision, conflictingAlts, text);
recognizer.NotifyErrorListeners(message);
}
public override void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState)
{
string format = "reportAttemptingFullContext d={0}, input='{1}'";
string decision = GetDecisionDescription(recognizer, dfa);
string text = ((ITokenStream)recognizer.InputStream).GetText(Interval.Of(startIndex, stopIndex));
string message = string.Format(format, decision, text);
recognizer.NotifyErrorListeners(message);
}
public override void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState)
{
string format = "reportContextSensitivity d={0}, input='{1}'";
string decision = GetDecisionDescription(recognizer, dfa);
string text = ((ITokenStream)recognizer.InputStream).GetText(Interval.Of(startIndex, stopIndex));
string message = string.Format(format, decision, text);
recognizer.NotifyErrorListeners(message);
}
protected internal virtual string GetDecisionDescription(Parser recognizer, DFA dfa)
{
int decision = dfa.decision;
int ruleIndex = dfa.atnStartState.ruleIndex;
string[] ruleNames = recognizer.RuleNames;
if (ruleIndex < 0 || ruleIndex >= ruleNames.Length)
{
return decision.ToString();
}
string ruleName = ruleNames[ruleIndex];
if (string.IsNullOrEmpty(ruleName))
{
return decision.ToString();
}
return string.Format("{0} ({1})", decision, ruleName);
}
/// <summary>
/// Computes the set of conflicting or ambiguous alternatives from a
/// configuration set, if that information was not already provided by the
/// parser.
/// </summary>
/// <remarks>
/// Computes the set of conflicting or ambiguous alternatives from a
/// configuration set, if that information was not already provided by the
/// parser.
/// </remarks>
/// <param name="reportedAlts">
/// The set of conflicting or ambiguous alternatives, as
/// reported by the parser.
/// </param>
/// <param name="configSet">The conflicting or ambiguous configuration set.</param>
/// <returns>
/// Returns
/// <paramref name="reportedAlts"/>
/// if it is not
/// <see langword="null"/>
/// , otherwise
/// returns the set of alternatives represented in
/// <paramref name="configSet"/>
/// .
/// </returns>
[return: NotNull]
protected internal virtual BitSet GetConflictingAlts(BitSet reportedAlts, ATNConfigSet configSet)
{
if (reportedAlts != null)
{
return reportedAlts;
}
BitSet result = new BitSet();
foreach (ATNConfig config in configSet.configs)
{
result.Set(config.alt);
}
return result;
}
}
}

Просмотреть файл

@ -0,0 +1,98 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using System.Globalization;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
namespace Antlr4.Runtime
{
/// <summary>A semantic predicate failed during validation.</summary>
/// <remarks>
/// A semantic predicate failed during validation. Validation of predicates
/// occurs when normally parsing the alternative just like matching a token.
/// Disambiguating predicate evaluation occurs when we test a predicate during
/// prediction.
/// </remarks>
[System.Serializable]
internal class FailedPredicateException : RecognitionException
{
private const long serialVersionUID = 5379330841495778709L;
private readonly int ruleIndex;
private readonly int predicateIndex;
private readonly string predicate;
public FailedPredicateException(Parser recognizer)
: this(recognizer, null)
{
}
public FailedPredicateException(Parser recognizer, string predicate)
: this(recognizer, predicate, null)
{
}
public FailedPredicateException(Parser recognizer, string predicate, string message)
: base(FormatMessage(predicate, message), recognizer, ((ITokenStream)recognizer.InputStream), recognizer.RuleContext)
{
ATNState s = recognizer.Interpreter.atn.states[recognizer.State];
AbstractPredicateTransition trans = (AbstractPredicateTransition)s.Transition(0);
if (trans is PredicateTransition)
{
this.ruleIndex = ((PredicateTransition)trans).ruleIndex;
this.predicateIndex = ((PredicateTransition)trans).predIndex;
}
else
{
this.ruleIndex = 0;
this.predicateIndex = 0;
}
this.predicate = predicate;
this.OffendingToken = recognizer.CurrentToken;
}
public virtual int RuleIndex
{
get
{
return ruleIndex;
}
}
public virtual int PredIndex
{
get
{
return predicateIndex;
}
}
[Nullable]
public virtual string Predicate
{
get
{
return predicate;
}
}
[return: NotNull]
private static string FormatMessage(string predicate, string message)
{
if (message != null)
{
return message;
}
return string.Format(CultureInfo.CurrentCulture, "failed predicate: {{{0}}}?", predicate);
}
}
}

Просмотреть файл

@ -0,0 +1,64 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Sharpen;
using System.IO;
namespace Antlr4.Runtime
{
/// <summary>How to emit recognition errors.</summary>
/// <remarks>How to emit recognition errors.</remarks>
#if COMPACT
internal interface IAntlrErrorListener<TSymbol>
#else
internal interface IAntlrErrorListener<in TSymbol>
#endif
{
/// <summary>Upon syntax error, notify any interested parties.</summary>
/// <remarks>
/// Upon syntax error, notify any interested parties. This is not how to
/// recover from errors or compute error messages.
/// <see cref="IAntlrErrorStrategy"/>
/// specifies how to recover from syntax errors and how to compute error
/// messages. This listener's job is simply to emit a computed message,
/// though it has enough information to create its own message in many cases.
/// <p>The
/// <see cref="RecognitionException"/>
/// is non-null for all syntax errors except
/// when we discover mismatched token errors that we can recover from
/// in-line, without returning from the surrounding rule (via the single
/// token insertion and deletion mechanism).</p>
/// </remarks>
/// <param name="output">
/// Where the error should be written.
/// </param>
/// <param name="recognizer">
/// What parser got the error. From this
/// object, you can access the context as well
/// as the input stream.
/// </param>
/// <param name="offendingSymbol">
/// The offending token in the input token
/// stream, unless recognizer is a lexer (then it's null). If
/// no viable alternative error,
/// <paramref name="e"/>
/// has token at which we
/// started production for the decision.
/// </param>
/// <param name="line">The line number in the input where the error occurred.</param>
/// <param name="charPositionInLine">The character position within that line where the error occurred.</param>
/// <param name="msg">The message to emit.</param>
/// <param name="e">
/// The exception generated by the parser that led to
/// the reporting of an error. It is null in the case where
/// the parser was able to recover in line without exiting the
/// surrounding rule.
/// </param>
void SyntaxError(TextWriter output, IRecognizer recognizer, TSymbol offendingSymbol, int line, int charPositionInLine, string msg, RecognitionException e);
}
}

Просмотреть файл

@ -0,0 +1,159 @@
// This file isn't generated, but this comment is necessary to exclude it from StyleCop analysis.
// <auto-generated/>
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
using Antlr4.Runtime;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
using System.IO;
namespace Antlr4.Runtime
{
/// <summary>
/// The interface for defining strategies to deal with syntax errors encountered
/// during a parse by ANTLR-generated parsers.
/// </summary>
/// <remarks>
/// The interface for defining strategies to deal with syntax errors encountered
/// during a parse by ANTLR-generated parsers. We distinguish between three
/// different kinds of errors:
/// <ul>
/// <li>The parser could not figure out which path to take in the ATN (none of
/// the available alternatives could possibly match)</li>
/// <li>The current input does not match what we were looking for</li>
/// <li>A predicate evaluated to false</li>
/// </ul>
/// Implementations of this interface report syntax errors by calling
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// .
/// <p>TODO: what to do about lexers</p>
/// </remarks>
internal interface IAntlrErrorStrategy
{
/// <summary>
/// Reset the error handler state for the specified
/// <paramref name="recognizer"/>
/// .
/// </summary>
/// <param name="recognizer">the parser instance</param>
void Reset(Parser recognizer);
/// <summary>
/// This method is called when an unexpected symbol is encountered during an
/// inline match operation, such as
/// <see cref="Parser.Match(int)"/>
/// . If the error
/// strategy successfully recovers from the match failure, this method
/// returns the
/// <see cref="IToken"/>
/// instance which should be treated as the
/// successful result of the match.
/// <p>Note that the calling code will not report an error if this method
/// returns successfully. The error strategy implementation is responsible
/// for calling
/// <see cref="Parser.NotifyErrorListeners(string)"/>
/// as appropriate.</p>
/// </summary>
/// <param name="recognizer">the parser instance</param>
/// <exception cref="RecognitionException">
/// if the error strategy was not able to
/// recover from the unexpected input symbol
/// </exception>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
[return: NotNull]
IToken RecoverInline(Parser recognizer);
/// <summary>
/// This method is called to recover from exception
/// <paramref name="e"/>
/// . This method is
/// called after
/// <see cref="ReportError(Parser, RecognitionException)"/>
/// by the default exception handler
/// generated for a rule method.
/// </summary>
/// <seealso cref="ReportError(Parser, RecognitionException)"/>
/// <param name="recognizer">the parser instance</param>
/// <param name="e">the recognition exception to recover from</param>
/// <exception cref="RecognitionException">
/// if the error strategy could not recover from
/// the recognition exception
/// </exception>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
void Recover(Parser recognizer, RecognitionException e);
/// <summary>
/// This method provides the error handler with an opportunity to handle
/// syntactic or semantic errors in the input stream before they result in a
/// <see cref="RecognitionException"/>
/// .
/// <p>The generated code currently contains calls to
/// <see cref="Sync(Parser)"/>
/// after
/// entering the decision state of a closure block (
/// <c>(...)*</c>
/// or
/// <c>(...)+</c>
/// ).</p>
/// <p>For an implementation based on Jim Idle's "magic sync" mechanism, see
/// <see cref="DefaultErrorStrategy.Sync(Parser)"/>
/// .</p>
/// </summary>
/// <seealso cref="DefaultErrorStrategy.Sync(Parser)"/>
/// <param name="recognizer">the parser instance</param>
/// <exception cref="RecognitionException">
/// if an error is detected by the error
/// strategy but cannot be automatically recovered at the current state in
/// the parsing process
/// </exception>
/// <exception cref="Antlr4.Runtime.RecognitionException"/>
void Sync(Parser recognizer);
/// <summary>
/// Tests whether or not
/// <paramref name="recognizer"/>
/// is in the process of recovering
/// from an error. In error recovery mode,
/// <see cref="Parser.Consume()"/>
/// adds
/// symbols to the parse tree by calling
/// <see cref="ParserRuleContext.AddErrorNode(IToken)"/>
/// instead of
/// <see cref="ParserRuleContext.AddChild(IToken)"/>
/// .
/// </summary>
/// <param name="recognizer">the parser instance</param>
/// <returns>
///
/// <see langword="true"/>
/// if the parser is currently recovering from a parse
/// error, otherwise
/// <see langword="false"/>
/// </returns>
bool InErrorRecoveryMode(Parser recognizer);
/// <summary>
/// This method is called by when the parser successfully matches an input
/// symbol.
/// </summary>
/// <remarks>
/// This method is called by when the parser successfully matches an input
/// symbol.
/// </remarks>
/// <param name="recognizer">the parser instance</param>
void ReportMatch(Parser recognizer);
/// <summary>
/// Report any kind of
/// <see cref="RecognitionException"/>
/// . This method is called by
/// the default exception handler generated for a rule method.
/// </summary>
/// <param name="recognizer">the parser instance</param>
/// <param name="e">the recognition exception to report</param>
void ReportError(Parser recognizer, RecognitionException e);
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше