File size: 5,789 Bytes
ffaa9fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from antlr4.IntervalSet import IntervalSet

from antlr4.RuleContext import RuleContext

from antlr4.Token import Token
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import ATNState, DecisionState


class ATN(object):
    __slots__ = (
        'grammarType', 'maxTokenType', 'states', 'decisionToState',
        'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
        'ruleToTokenType', 'lexerActions', 'modeToStartState'
    )

    INVALID_ALT_NUMBER = 0

    # Used for runtime deserialization of ATNs from strings#/
    def __init__(self, grammarType:ATNType , maxTokenType:int ):
        # The type of the ATN.
        self.grammarType = grammarType
        # The maximum value for any symbol recognized by a transition in the ATN.
        self.maxTokenType = maxTokenType
        self.states = []
        # Each subrule/rule is a decision point and we must track them so we
        #  can go back later and build DFA predictors for them.  This includes
        #  all the rules, subrules, optional blocks, ()+, ()* etc...
        self.decisionToState = []
        # Maps from rule index to starting state number.
        self.ruleToStartState = []
        # Maps from rule index to stop state number.
        self.ruleToStopState = None
        self.modeNameToStartState = dict()
        # For lexer ATNs, this maps the rule index to the resulting token type.
        # For parser ATNs, this maps the rule index to the generated bypass token
        # type if the
        # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
        # deserialization option was specified; otherwise, this is {@code null}.
        self.ruleToTokenType = None
        # For lexer ATNs, this is an array of {@link LexerAction} objects which may
        # be referenced by action transitions in the ATN.
        self.lexerActions = None
        self.modeToStartState = []

    # Compute the set of valid tokens that can occur starting in state {@code s}.
    #  If {@code ctx} is null, the set of tokens will not include what can follow
    #  the rule surrounding {@code s}. In other words, the set will be
    #  restricted to tokens reachable staying within {@code s}'s rule.
    def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
        from antlr4.LL1Analyzer import LL1Analyzer
        anal = LL1Analyzer(self)
        return anal.LOOK(s, ctx=ctx)

    # Compute the set of valid tokens that can occur starting in {@code s} and
    # staying in same rule. {@link Token#EPSILON} is in set if we reach end of
    # rule.
    def nextTokensNoContext(self, s:ATNState):
        if s.nextTokenWithinRule is not None:
            return s.nextTokenWithinRule
        s.nextTokenWithinRule = self.nextTokensInContext(s, None)
        s.nextTokenWithinRule.readonly = True
        return s.nextTokenWithinRule

    def nextTokens(self, s:ATNState, ctx:RuleContext = None):
        if ctx==None:
            return self.nextTokensNoContext(s)
        else:
            return self.nextTokensInContext(s, ctx)

    def addState(self, state:ATNState):
        if state is not None:
            state.atn = self
            state.stateNumber = len(self.states)
        self.states.append(state)

    def removeState(self, state:ATNState):
        self.states[state.stateNumber] = None # just free mem, don't shift states in list

    def defineDecisionState(self, s:DecisionState):
        self.decisionToState.append(s)
        s.decision = len(self.decisionToState)-1
        return s.decision

    def getDecisionState(self, decision:int):
        if len(self.decisionToState)==0:
            return None
        else:
            return self.decisionToState[decision]

    # Computes the set of input symbols which could follow ATN state number
    # {@code stateNumber} in the specified full {@code context}. This method
    # considers the complete parser context, but does not evaluate semantic
    # predicates (i.e. all predicates encountered during the calculation are
    # assumed true). If a path in the ATN exists from the starting state to the
    # {@link RuleStopState} of the outermost context without matching any
    # symbols, {@link Token#EOF} is added to the returned set.
    #
    # <p>If {@code context} is {@code null}, it is treated as
    # {@link ParserRuleContext#EMPTY}.</p>
    #
    # @param stateNumber the ATN state number
    # @param context the full parse context
    # @return The set of potentially valid input symbols which could follow the
    # specified state in the specified context.
    # @throws IllegalArgumentException if the ATN does not contain a state with
    # number {@code stateNumber}
    #/
    def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
        if stateNumber < 0 or stateNumber >= len(self.states):
            raise Exception("Invalid state number.")
        s = self.states[stateNumber]
        following = self.nextTokens(s)
        if Token.EPSILON not in following:
            return following
        expected = IntervalSet()
        expected.addSet(following)
        expected.removeOne(Token.EPSILON)
        while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
            invokingState = self.states[ctx.invokingState]
            rt = invokingState.transitions[0]
            following = self.nextTokens(rt.followState)
            expected.addSet(following)
            expected.removeOne(Token.EPSILON)
            ctx = ctx.parentCtx
        if Token.EPSILON in following:
            expected.addOne(Token.EOF)
        return expected